VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 17531

Last change on this file since 17531 was 17215, checked in by vboxsync, 16 years ago

Split up the definitions and the guest code. Otherwise we'll end up using e.g. wrong masks in Bth code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.0 KB
Line 
1/* $Id: PGMAllGst.h 17215 2009-02-27 16:33:19Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Internal Functions *
25*******************************************************************************/
26__BEGIN_DECLS
27PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
28PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
29PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
30#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
31PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
32PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
33#endif
34PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
35#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
36# ifndef IN_RING3
37PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
38# if PGM_GST_TYPE == PGM_TYPE_PAE \
39 || PGM_GST_TYPE == PGM_TYPE_AMD64
40PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
41# endif
42# endif
43#endif
44__END_DECLS
45
46
47
48/**
49 * Gets effective Guest OS page information.
50 *
51 * When GCPtr is in a big page, the function will return as if it was a normal
52 * 4KB page. If the need for distinguishing between big and normal page becomes
53 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
54 * purpose.
55 *
56 * @returns VBox status.
57 * @param pVM VM Handle.
58 * @param GCPtr Guest Context virtual address of the page. Page aligned!
59 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
60 * @param pGCPhys Where to store the GC physical address of the page.
61 * This is page aligned. The fact that the
62 */
63PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
64{
65#if PGM_GST_TYPE == PGM_TYPE_REAL \
66 || PGM_GST_TYPE == PGM_TYPE_PROT
67 /*
68 * Fake it.
69 */
70 if (pfFlags)
71 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
72 if (pGCPhys)
73 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
74 return VINF_SUCCESS;
75
76#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
77
78 /*
79 * Get the PDE.
80 */
81# if PGM_GST_TYPE == PGM_TYPE_32BIT
82 X86PDE Pde = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
83
84#elif PGM_GST_TYPE == PGM_TYPE_PAE
85 /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present.
86 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx). */
87 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
88 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
89
90#elif PGM_GST_TYPE == PGM_TYPE_AMD64
91 PX86PML4E pPml4e;
92 X86PDPE Pdpe;
93 X86PDEPAE Pde = pgmGstGetLongModePDEEx(&pVM->pgm.s, GCPtr, &pPml4e, &Pdpe);
94 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
95
96 Assert(pPml4e);
97 if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
98 return VERR_PAGE_TABLE_NOT_PRESENT;
99
100 /* Merge accessed, write, user and no-execute bits into the PDE. */
101 Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
102 Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
103 Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User;
104 Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
105# endif
106
107 /*
108 * Lookup the page.
109 */
110 if (!Pde.n.u1Present)
111 return VERR_PAGE_TABLE_NOT_PRESENT;
112
113 if ( !Pde.b.u1Size
114# if PGM_GST_TYPE != PGM_TYPE_AMD64
115 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
116# endif
117 )
118 {
119 PGSTPT pPT;
120 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
121 if (RT_FAILURE(rc))
122 return rc;
123
124 /*
125 * Get PT entry and check presence.
126 */
127 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
128 if (!Pte.n.u1Present)
129 return VERR_PAGE_NOT_PRESENT;
130
131 /*
132 * Store the result.
133 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
134 * where the PDPE is simplified.
135 */
136 if (pfFlags)
137 {
138 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
139 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
140# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
141 /* The NX bit is determined by a bitwise OR between the PT and PD */
142 if (fNoExecuteBitValid)
143 *pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
144# endif
145 }
146 if (pGCPhys)
147 *pGCPhys = Pte.u & GST_PTE_PG_MASK;
148 }
149 else
150 {
151 /*
152 * Map big to 4k PTE and store the result
153 */
154 if (pfFlags)
155 {
156 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
157 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
158# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
159 /* The NX bit is determined by a bitwise OR between the PT and PD */
160 if (fNoExecuteBitValid)
161 *pfFlags |= (Pde.u & X86_PTE_PAE_NX);
162# endif
163 }
164 if (pGCPhys)
165 *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
166 }
167 return VINF_SUCCESS;
168#else
169# error "shouldn't be here!"
170 /* something else... */
171 return VERR_NOT_SUPPORTED;
172#endif
173}
174
175
176/**
177 * Modify page flags for a range of pages in the guest's tables
178 *
179 * The existing flags are ANDed with the fMask and ORed with the fFlags.
180 *
181 * @returns VBox status code.
182 * @param pVM VM handle.
183 * @param GCPtr Virtual address of the first page in the range. Page aligned!
184 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
185 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
186 * @param fMask The AND mask - page flags X86_PTE_*.
187 */
188PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
189{
190#if PGM_GST_TYPE == PGM_TYPE_32BIT \
191 || PGM_GST_TYPE == PGM_TYPE_PAE \
192 || PGM_GST_TYPE == PGM_TYPE_AMD64
193
194 for (;;)
195 {
196 /*
197 * Get the PD entry.
198 */
199# if PGM_GST_TYPE == PGM_TYPE_32BIT
200 PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVM->pgm.s, GCPtr);
201
202# elif PGM_GST_TYPE == PGM_TYPE_PAE
203 /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
204 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
205 */
206 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
207 Assert(pPde);
208 if (!pPde)
209 return VERR_PAGE_TABLE_NOT_PRESENT;
210# elif PGM_GST_TYPE == PGM_TYPE_AMD64
211 /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
212 PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVM->pgm.s, GCPtr);
213 Assert(pPde);
214 if (!pPde)
215 return VERR_PAGE_TABLE_NOT_PRESENT;
216# endif
217 GSTPDE Pde = *pPde;
218 Assert(Pde.n.u1Present);
219 if (!Pde.n.u1Present)
220 return VERR_PAGE_TABLE_NOT_PRESENT;
221
222 if ( !Pde.b.u1Size
223# if PGM_GST_TYPE != PGM_TYPE_AMD64
224 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
225# endif
226 )
227 {
228 /*
229 * 4KB Page table
230 *
231 * Walk page tables and pages till we're done.
232 */
233 PGSTPT pPT;
234 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
235 if (RT_FAILURE(rc))
236 return rc;
237
238 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
239 while (iPTE < RT_ELEMENTS(pPT->a))
240 {
241 GSTPTE Pte = pPT->a[iPTE];
242 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
243 | (fFlags & ~GST_PTE_PG_MASK);
244 pPT->a[iPTE] = Pte;
245
246 /* next page */
247 cb -= PAGE_SIZE;
248 if (!cb)
249 return VINF_SUCCESS;
250 GCPtr += PAGE_SIZE;
251 iPTE++;
252 }
253 }
254 else
255 {
256 /*
257 * 4MB Page table
258 */
259# if PGM_GST_TYPE == PGM_TYPE_32BIT
260 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
261# else
262 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
263# endif
264 | (fFlags & ~GST_PTE_PG_MASK)
265 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
266 *pPde = Pde;
267
268 /* advance */
269 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
270 if (cbDone >= cb)
271 return VINF_SUCCESS;
272 cb -= cbDone;
273 GCPtr += cbDone;
274 }
275 }
276
277#else
278 /* real / protected mode: ignore. */
279 return VINF_SUCCESS;
280#endif
281}
282
283
284/**
285 * Retrieve guest PDE information
286 *
287 * @returns VBox status code.
288 * @param pVM The virtual machine.
289 * @param GCPtr Guest context pointer
290 * @param pPDE Pointer to guest PDE structure
291 */
292PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE)
293{
294#if PGM_GST_TYPE == PGM_TYPE_32BIT \
295 || PGM_GST_TYPE == PGM_TYPE_PAE \
296 || PGM_GST_TYPE == PGM_TYPE_AMD64
297
298# if PGM_GST_TYPE == PGM_TYPE_32BIT
299 X86PDE Pde = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
300# elif PGM_GST_TYPE == PGM_TYPE_PAE
301 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
302# elif PGM_GST_TYPE == PGM_TYPE_AMD64
303 X86PDEPAE Pde = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr);
304# endif
305
306 pPDE->u = (X86PGPAEUINT)Pde.u;
307 return VINF_SUCCESS;
308#else
309 AssertFailed();
310 return VERR_NOT_IMPLEMENTED;
311#endif
312}
313
314
315#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
316
317#undef LOG_GROUP
318#define LOG_GROUP LOG_GROUP_PGM_POOL
319
320/**
321 * Registers physical page monitors for the necessary paging
322 * structures to detect conflicts with our guest mappings.
323 *
324 * This is always called after mapping CR3.
325 * This is never called with fixed mappings.
326 *
327 * @returns VBox status, no specials.
328 * @param pVM VM handle.
329 * @param GCPhysCR3 The physical address in the CR3 register.
330 */
331PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
332{
333 Assert(!pVM->pgm.s.fMappingsFixed);
334 int rc = VINF_SUCCESS;
335
336 /*
337 * Register/Modify write phys handler for guest's CR3 if it changed.
338 */
339#if PGM_GST_TYPE == PGM_TYPE_32BIT
340
341 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
342 {
343# ifndef PGMPOOL_WITH_MIXED_PT_CR3
344 const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
345 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
346 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
347 else
348 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
349 pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
350 pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
351 pVM->pgm.s.pfnRCGstWriteHandlerCR3, 0,
352 pVM->pgm.s.pszR3GstWriteHandlerCR3);
353# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
354 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
355 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
356 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
357 ? PGMPOOL_IDX_PAE_PD
358 : PGMPOOL_IDX_PD,
359 GCPhysCR3);
360# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
361 if (RT_FAILURE(rc))
362 {
363 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
364 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
365 return rc;
366 }
367 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
368 }
369
370#elif PGM_GST_TYPE == PGM_TYPE_PAE
371 /* Monitor the PDPT page */
372 /*
373 * Register/Modify write phys handler for guest's CR3 if it changed.
374 */
375# ifndef PGMPOOL_WITH_MIXED_PT_CR3
376 AssertFailed();
377# endif
378 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
379 {
380 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
381 if (RT_FAILURE(rc))
382 {
383 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
384 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
385 return rc;
386 }
387 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
388 }
389
390 /*
391 * Do the 4 PDs.
392 */
393 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
394 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
395 {
396 if (pGuestPDPT->a[i].n.u1Present)
397 {
398 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
399 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
400 {
401 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
402
403 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
404 }
405
406 if (RT_FAILURE(rc))
407 {
408 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
409 rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
410 return rc;
411 }
412 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
413 }
414 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
415 {
416 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
417 AssertRC(rc);
418 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
419 }
420 }
421
422#else
423 /* prot/real/amd64 mode stub */
424
425#endif
426 return rc;
427}
428
429/**
430 * Deregisters any physical page monitors installed by MonitorCR3.
431 *
432 * @returns VBox status code, no specials.
433 * @param pVM The VM handle.
434 */
435PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
436{
437 int rc = VINF_SUCCESS;
438
439 /*
440 * Deregister the access handlers.
441 *
442 * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
443 * before we enter GC again.
444 */
445#if PGM_GST_TYPE == PGM_TYPE_32BIT
446 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
447 {
448# ifndef PGMPOOL_WITH_MIXED_PT_CR3
449 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
450 AssertRCReturn(rc, rc);
451# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
452 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
453 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
454 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
455 ? PGMPOOL_IDX_PAE_PD
456 : PGMPOOL_IDX_PD);
457 AssertRCReturn(rc, rc);
458# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
459 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
460 }
461
462#elif PGM_GST_TYPE == PGM_TYPE_PAE
463 /* The PDPT page */
464# ifndef PGMPOOL_WITH_MIXED_PT_CR3
465 AssertFailed();
466# endif
467
468 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
469 {
470 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT);
471 AssertRC(rc);
472 }
473
474 /* The 4 PDs. */
475 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
476 {
477 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
478 {
479 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
480 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
481 AssertRC(rc2);
482 if (RT_FAILURE(rc2))
483 rc = rc2;
484 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
485 }
486 }
487#else
488 /* prot/real/amd64 mode stub */
489#endif
490 return rc;
491
492}
493
494#undef LOG_GROUP
495#define LOG_GROUP LOG_GROUP_PGM
496
497#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
498
499
500#if PGM_GST_TYPE == PGM_TYPE_32BIT \
501 || PGM_GST_TYPE == PGM_TYPE_PAE \
502 || PGM_GST_TYPE == PGM_TYPE_AMD64
503/**
504 * Updates one virtual handler range.
505 *
506 * @returns 0
507 * @param pNode Pointer to a PGMVIRTHANDLER.
508 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
509 */
510static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
511{
512 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
513 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
514 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
515
516#if PGM_GST_TYPE == PGM_TYPE_32BIT
517 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pState->pVM->pgm.s);
518#endif
519
520 RTGCPTR GCPtr = pCur->Core.Key;
521#if PGM_GST_MODE != PGM_MODE_AMD64
522 /* skip all stuff above 4GB if not AMD64 mode. */
523 if (GCPtr >= _4GB)
524 return 0;
525#endif
526
527 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
528 unsigned iPage = 0;
529 while (iPage < pCur->cPages)
530 {
531#if PGM_GST_TYPE == PGM_TYPE_32BIT
532 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
533#elif PGM_GST_TYPE == PGM_TYPE_PAE
534 X86PDEPAE Pde = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
535#elif PGM_GST_TYPE == PGM_TYPE_AMD64
536 X86PDEPAE Pde = pgmGstGetLongModePDE(&pState->pVM->pgm.s, GCPtr);
537#endif
538 if (Pde.n.u1Present)
539 {
540 if ( !Pde.b.u1Size
541# if PGM_GST_TYPE != PGM_TYPE_AMD64
542 || !(pState->cr4 & X86_CR4_PSE)
543# endif
544 )
545 {
546 /*
547 * Normal page table.
548 */
549 PGSTPT pPT;
550 int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
551 if (RT_SUCCESS(rc))
552 {
553 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
554 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
555 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
556 {
557 GSTPTE Pte = pPT->a[iPTE];
558 RTGCPHYS GCPhysNew;
559 if (Pte.n.u1Present)
560 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
561 else
562 GCPhysNew = NIL_RTGCPHYS;
563 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
564 {
565 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
566 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
567#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
568 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
569 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
570 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
571 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
572#endif
573 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
574 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
575 }
576 }
577 }
578 else
579 {
580 /* not-present. */
581 offPage = 0;
582 AssertRC(rc);
583 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
584 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
585 iPTE++, iPage++, GCPtr += PAGE_SIZE)
586 {
587 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
588 {
589 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
590#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
591 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
592 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
593 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
594 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
595#endif
596 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
597 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
598 }
599 }
600 }
601 }
602 else
603 {
604 /*
605 * 2/4MB page.
606 */
607 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
608 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
609 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
610 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
611 {
612 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
613 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
614 {
615 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
616 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
617#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
618 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
619 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
620 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
621 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
622#endif
623 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
624 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
625 }
626 }
627 } /* pde type */
628 }
629 else
630 {
631 /* not-present. */
632 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
633 cPages && iPage < pCur->cPages;
634 iPage++, GCPtr += PAGE_SIZE)
635 {
636 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
637 {
638 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
639 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
640 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
641 }
642 }
643 offPage = 0;
644 }
645 } /* for pages in virtual mapping. */
646
647 return 0;
648}
649#endif /* 32BIT, PAE and AMD64 */
650
651
652/**
653 * Updates the virtual page access handlers.
654 *
655 * @returns true if bits were flushed.
656 * @returns false if bits weren't flushed.
657 * @param pVM VM handle.
658 * @param pPDSrc The page directory.
659 * @param cr4 The cr4 register value.
660 */
661PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
662{
663#if PGM_GST_TYPE == PGM_TYPE_32BIT \
664 || PGM_GST_TYPE == PGM_TYPE_PAE \
665 || PGM_GST_TYPE == PGM_TYPE_AMD64
666
667 /** @todo
668 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
669 */
670
671 /*
672 * Resolve any virtual address based access handlers to GC physical addresses.
673 * This should be fairly quick.
674 */
675 PGMHVUSTATE State;
676
677 pgmLock(pVM);
678 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
679 State.pVM = pVM;
680 State.fTodo = pVM->pgm.s.fSyncFlags;
681 State.cr4 = cr4;
682 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
683 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
684
685
686 /*
687 * Set / reset bits?
688 */
689 if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
690 {
691 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
692 Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
693 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
694 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
695 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
696 }
697 pgmUnlock(pVM);
698
699 return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
700
701#else /* real / protected */
702 return false;
703#endif
704}
705
706#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
707
708#if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
709
710/**
711 * Write access handler for the Guest CR3 page in 32-bit mode.
712 *
713 * This will try interpret the instruction, if failure fail back to the recompiler.
714 * Check if the changed PDEs are marked present and conflicts with our
715 * mappings. If conflict, we'll switch to the host context and resolve it there
716 *
717 * @returns VBox status code (appropritate for trap handling and GC return).
718 * @param pVM VM Handle.
719 * @param uErrorCode CPU Error code.
720 * @param pRegFrame Trap register frame.
721 * @param pvFault The fault address (cr2).
722 * @param GCPhysFault The GC physical address corresponding to pvFault.
723 * @param pvUser User argument.
724 */
725PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
726{
727 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
728
729 /*
730 * Try interpret the instruction.
731 */
732 uint32_t cb;
733 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
734 if (RT_SUCCESS(rc) && cb)
735 {
736 /*
737 * Check if the modified PDEs are present and mappings.
738 */
739 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
740 const unsigned iPD1 = offPD / sizeof(X86PDE);
741 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
742
743 Assert(cb > 0 && cb <= 8);
744 Assert(iPD1 < X86_PG_ENTRIES);
745 Assert(iPD2 < X86_PG_ENTRIES);
746
747#ifdef DEBUG
748 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT));
749 if (iPD1 != iPD2)
750 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT));
751#endif
752
753 if (!pVM->pgm.s.fMappingsFixed)
754 {
755 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
756 if ( ( pPDSrc->a[iPD1].n.u1Present
757 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
758 || ( iPD1 != iPD2
759 && pPDSrc->a[iPD2].n.u1Present
760 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
761 )
762 {
763 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
764 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
765 if (rc == VINF_SUCCESS)
766 rc = VINF_PGM_SYNC_CR3;
767 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
768 return rc;
769 }
770 }
771
772 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
773 }
774 else
775 {
776 Assert(RT_FAILURE(rc));
777 if (rc == VERR_EM_INTERPRETER)
778 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
779 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
780 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
781 }
782 return rc;
783}
784
785#endif /* PGM_TYPE_32BIT && !IN_RING3 */
786#if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
787
788/**
789 * Write access handler for the Guest CR3 page in PAE mode.
790 *
791 * This will try interpret the instruction, if failure fail back to the recompiler.
792 * Check if the changed PDEs are marked present and conflicts with our
793 * mappings. If conflict, we'll switch to the host context and resolve it there
794 *
795 * @returns VBox status code (appropritate for trap handling and GC return).
796 * @param pVM VM Handle.
797 * @param uErrorCode CPU Error code.
798 * @param pRegFrame Trap register frame.
799 * @param pvFault The fault address (cr2).
800 * @param GCPhysFault The GC physical address corresponding to pvFault.
801 * @param pvUser User argument.
802 */
803PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
804{
805 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
806
807 /*
808 * Try interpret the instruction.
809 */
810 uint32_t cb;
811 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
812 if (RT_SUCCESS(rc) && cb)
813 {
814 /*
815 * Check if any of the PDs have changed.
816 * We'll simply check all of them instead of figuring out which one/two to check.
817 */
818 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
819 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
820 {
821 if ( pGuestPDPT->a[i].n.u1Present
822 && (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK)
823 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
824 {
825 /*
826 * The PDPE has changed.
827 * We will schedule a monitoring update for the next TLB Flush,
828 * InvalidatePage or SyncCR3.
829 *
830 * This isn't perfect, because a lazy page sync might be dealing with an half
831 * updated PDPE. However, we assume that the guest OS is disabling interrupts
832 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
833 * executing.
834 */
835 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
836 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
837 i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
838 }
839 }
840
841 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
842 }
843 else
844 {
845 Assert(RT_FAILURE(rc));
846 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
847 if (rc == VERR_EM_INTERPRETER)
848 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
849 }
850 Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
851 return rc;
852}
853
854
855/**
856 * Write access handler for the Guest PDs in PAE mode.
857 *
858 * This will try interpret the instruction, if failure fail back to the recompiler.
859 * Check if the changed PDEs are marked present and conflicts with our
860 * mappings. If conflict, we'll switch to the host context and resolve it there
861 *
862 * @returns VBox status code (appropritate for trap handling and GC return).
863 * @param pVM VM Handle.
864 * @param uErrorCode CPU Error code.
865 * @param pRegFrame Trap register frame.
866 * @param pvFault The fault address (cr2).
867 * @param GCPhysFault The GC physical address corresponding to pvFault.
868 * @param pvUser User argument.
869 */
870PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
871{
872 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
873
874 /*
875 * Try interpret the instruction.
876 */
877 uint32_t cb;
878 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
879 if (RT_SUCCESS(rc) && cb)
880 {
881 /*
882 * Figure out which of the 4 PDs this is.
883 */
884 RTGCPTR i;
885 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
886 for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
887 if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
888 {
889 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
890 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
891 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
892 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
893
894 Assert(cb > 0 && cb <= 8);
895 Assert(iPD1 < X86_PG_PAE_ENTRIES);
896 Assert(iPD2 < X86_PG_PAE_ENTRIES);
897
898# ifdef LOG_ENABLED
899 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n",
900 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
901 if (iPD1 != iPD2)
902 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n",
903 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
904# endif
905
906 if (!pVM->pgm.s.fMappingsFixed)
907 {
908 if ( ( pPDSrc->a[iPD1].n.u1Present
909 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
910 || ( iPD1 != iPD2
911 && pPDSrc->a[iPD2].n.u1Present
912 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
913 )
914 {
915 Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
916 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
917 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
918 return VINF_PGM_SYNC_CR3;
919 }
920 }
921 break; /* ASSUMES no duplicate entries... */
922 }
923 Assert(i < 4);
924
925 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
926 }
927 else
928 {
929 Assert(RT_FAILURE(rc));
930 if (rc == VERR_EM_INTERPRETER)
931 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
932 else
933 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
934 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
935 }
936 return rc;
937}
938
939#endif /* PGM_TYPE_PAE && !IN_RING3 */
940
941#endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette