VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 18645

Last change on this file since 18645 was 18291, checked in by vboxsync, 16 years ago

PGM: Map PGMRAMRANGES above 4GB outside HMA (see defect). Changed PGMR3MapPT to take a flag indicating whether PGMR3UnmapPT will be used; this way we can select a more optimal allocation function for the ram ranges. PGMMapResolveConflicts: Walk the list correctly after reloc. pgmMapClearShadowPDEs: Don't clear PGM_PLXFLAGS_MAPPING when we shouldn't (odd PAE cases).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.6 KB
Line 
1/* $Id: PGMAllMap.cpp 18291 2009-03-26 05:11:07Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
227 return; /* too early */
228
229 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
230 Assert(enmShadowMode <= PGMMODE_PAE_NX);
231
232 /*
233 * Init the page tables and insert them into the page directories.
234 */
235 unsigned i = pMap->cPTs;
236 iNewPDE += i;
237 while (i-- > 0)
238 {
239 iNewPDE--;
240
241 switch(enmShadowMode)
242 {
243 case PGMMODE_32_BIT:
244 {
245 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
246 AssertFatal(pShw32BitPd);
247#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
248 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
249#endif
250 if ( pShw32BitPd->a[iNewPDE].n.u1Present
251 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
252 {
253 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
254 }
255
256 X86PDE Pde;
257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
258 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
259 pShw32BitPd->a[iNewPDE] = Pde;
260#ifdef IN_RC
261 /* Unlock dynamic mappings again. */
262 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
263#endif
264 break;
265 }
266
267 case PGMMODE_PAE:
268 case PGMMODE_PAE_NX:
269 {
270 const unsigned iPdPt = iNewPDE / 256;
271 unsigned iPDE = iNewPDE * 2 % 512;
272 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
273 Assert(pShwPdpt);
274#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
275 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
276#endif
277 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
278 if (!pShwPaePd)
279 {
280 X86PDPE GstPdpe;
281
282 if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
283 {
284 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
285 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
286 }
287 else
288 {
289 PX86PDPE pGstPdpe;
290 pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
291 if (pGstPdpe)
292 GstPdpe = *pGstPdpe;
293 else
294 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
295 }
296 int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
297 AssertFatal(RT_SUCCESS(rc));
298 }
299 Assert(pShwPaePd);
300#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
301 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
302#endif
303 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
304 AssertFatal(pPoolPagePd);
305
306 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
307 {
308 /* Mark the page as locked; disallow flushing. */
309 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
310 }
311#ifdef VBOX_STRICT
312 else
313 if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)
314 {
315 Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE);
316 AssertFatalMsg((pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
317 Assert(pShwPaePd->a[iPDE+1].u & PGM_PDFLAGS_MAPPING);
318 AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
319 }
320#endif
321 if ( pShwPaePd->a[iPDE].n.u1Present
322 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
323 {
324 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
325 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
326 }
327
328 X86PDEPAE PdePae0;
329 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
330 pShwPaePd->a[iPDE] = PdePae0;
331
332 /* 2nd 2 MB PDE of the 4 MB region */
333 iPDE++;
334 AssertFatal(iPDE < 512);
335
336 if ( pShwPaePd->a[iPDE].n.u1Present
337 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
338 {
339 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
340 }
341 X86PDEPAE PdePae1;
342 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
343 pShwPaePd->a[iPDE] = PdePae1;
344
345 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
346 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
347
348#ifdef IN_RC
349 /* Unlock dynamic mappings again. */
350 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
351 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
352#endif
353 break;
354 }
355
356 default:
357 AssertFailed();
358 break;
359 }
360 }
361}
362
363
364/**
365 * Clears all PDEs involved with the mapping in the shadow page table.
366 *
367 * @param pVM The VM handle.
368 * @param pShwPageCR3 CR3 root page
369 * @param pMap Pointer to the mapping in question.
370 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
371 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
372 */
373void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
374{
375 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
376
377 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
378 return;
379
380 Assert(pShwPageCR3);
381# ifdef IN_RC
382 Assert(pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3));
383# endif
384
385 PX86PDPT pCurrentShwPdpt = NULL;
386
387 if ( PGMGetGuestMode(pVM) >= PGMMODE_PAE
388 && pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3))
389 {
390 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
391 }
392
393 unsigned i = pMap->cPTs;
394 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
395
396 iOldPDE += i;
397 while (i-- > 0)
398 {
399 iOldPDE--;
400
401 switch(enmShadowMode)
402 {
403 case PGMMODE_32_BIT:
404 {
405 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
406 AssertFatal(pShw32BitPd);
407
408 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
409 pShw32BitPd->a[iOldPDE].u = 0;
410 break;
411 }
412
413 case PGMMODE_PAE:
414 case PGMMODE_PAE_NX:
415 {
416 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
417 unsigned iPDE = iOldPDE * 2 % 512;
418 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
419 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
420
421 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
422 if (fDeactivateCR3)
423 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
424 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
425 {
426 /* See if there are any other mappings here. This is suboptimal code. */
427 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
428 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
429 if ( pCur != pMap
430 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
431 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
432 {
433 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
434 break;
435 }
436 }
437 if (pCurrentShwPdpt)
438 {
439 /* If the page directory of the old CR3 is reused in the new one, then don't clear the hypervisor mappings. */
440 if ((pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK))
441 {
442 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
443 break;
444 }
445 }
446 AssertFatal(pShwPaePd);
447
448 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
449 pShwPaePd->a[iPDE].u = 0;
450
451 iPDE++;
452 AssertFatal(iPDE < 512);
453
454 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
455 pShwPaePd->a[iPDE].u = 0;
456
457 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
458 AssertFatal(pPoolPagePd);
459
460 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
461 {
462 /* Mark the page as unlocked; allow flushing again. */
463 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
464 }
465 break;
466 }
467
468 default:
469 AssertFailed();
470 break;
471 }
472 }
473}
474#endif /* !IN_RING0 */
475
476#if defined(VBOX_STRICT) && !defined(IN_RING0)
477/**
478 * Clears all PDEs involved with the mapping in the shadow page table.
479 *
480 * @param pVM The VM handle.
481 * @param pShwPageCR3 CR3 root page
482 * @param pMap Pointer to the mapping in question.
483 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
484 */
485static void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
486{
487 Assert(pShwPageCR3);
488
489 uint32_t i = pMap->cPTs;
490 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
491
492 iPDE += i;
493 while (i-- > 0)
494 {
495 iPDE--;
496
497 switch (enmShadowMode)
498 {
499 case PGMMODE_32_BIT:
500 {
501 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
502 AssertFatal(pShw32BitPd);
503
504 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
505 ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
506 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
507 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
508 break;
509 }
510
511 case PGMMODE_PAE:
512 case PGMMODE_PAE_NX:
513 {
514 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
515 unsigned iPaePDE = iPDE * 2 % 512;
516 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
517 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
518 AssertFatal(pShwPaePd);
519
520 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
521 ("Expected %RX64 vs %RX64; iPDE=%#x iPD=%#x iPaePDE=%#x %RGv %s\n",
522 pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
523 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
524
525 iPaePDE++;
526 AssertFatal(iPaePDE < 512);
527
528 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
529 ("Expected %RX64 vs %RX64; iPDE=%#x iPD=%#x iPaePDE=%#x %RGv %s\n",
530 pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
531 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
532
533 AssertMsg(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING,
534 ("%RX64; iPD=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
535 pPdpt->a[iPD].u,
536 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
537 break;
538 }
539
540 default:
541 AssertFailed();
542 break;
543 }
544 }
545}
546
547
548/**
549 * Check the hypervisor mappings in the active CR3.
550 *
551 * @param pVM The virtual machine.
552 */
553VMMDECL(void) PGMMapCheck(PVM pVM)
554{
555 /*
556 * Can skip this if mappings are disabled.
557 */
558 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
559 return;
560
561 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
562
563 /*
564 * Iterate mappings.
565 */
566 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
567 {
568 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
569
570 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
571 }
572}
573#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
574
575#ifndef IN_RING0
576
577/**
578 * Apply the hypervisor mappings to the active CR3.
579 *
580 * @returns VBox status.
581 * @param pVM The virtual machine.
582 * @param pShwPageCR3 CR3 root page
583 */
584int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
585{
586 /*
587 * Can skip this if mappings are disabled.
588 */
589 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
590 return VINF_SUCCESS;
591
592 /* Note. A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
593 Log4(("pgmMapActivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
594
595 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
596
597 /*
598 * Iterate mappings.
599 */
600 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
601 {
602 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
603 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
604 }
605 return VINF_SUCCESS;
606}
607
608
609/**
610 * Remove the hypervisor mappings from the specified CR3
611 *
612 * @returns VBox status.
613 * @param pVM The virtual machine.
614 * @param pShwPageCR3 CR3 root page
615 */
616int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
617{
618 /*
619 * Can skip this if mappings are disabled.
620 */
621 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
622 return VINF_SUCCESS;
623
624 Assert(pShwPageCR3);
625 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
626
627 /*
628 * Iterate mappings.
629 */
630 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
631 {
632 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
633 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
634 }
635 return VINF_SUCCESS;
636}
637
638
639/**
640 * Checks guest PD for conflicts with VMM GC mappings.
641 *
642 * @returns true if conflict detected.
643 * @returns false if not.
644 * @param pVM The virtual machine.
645 */
646VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
647{
648 /*
649 * Can skip this if mappings are safely fixed.
650 */
651 if (pVM->pgm.s.fMappingsFixed)
652 return false;
653
654 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
655 Assert(enmGuestMode <= PGMMODE_PAE_NX);
656
657 /*
658 * Iterate mappings.
659 */
660 if (enmGuestMode == PGMMODE_32_BIT)
661 {
662 /*
663 * Resolve the page directory.
664 */
665 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
666 Assert(pPD);
667
668 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
669 {
670 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
671 unsigned iPT = pCur->cPTs;
672 while (iPT-- > 0)
673 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
674 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
675 {
676 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
677
678#ifdef IN_RING3
679 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
680 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
681 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
682 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
683#else
684 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
685 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
686 (iPT + iPDE) << X86_PD_SHIFT,
687 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
688#endif
689 return true;
690 }
691 }
692 }
693 else if ( enmGuestMode == PGMMODE_PAE
694 || enmGuestMode == PGMMODE_PAE_NX)
695 {
696 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
697 {
698 RTGCPTR GCPtr = pCur->GCPtr;
699
700 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
701 while (iPT-- > 0)
702 {
703 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
704
705 if ( Pde.n.u1Present
706 && (pVM->fRawR0Enabled || Pde.n.u1User))
707 {
708 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
709#ifdef IN_RING3
710 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
711 " PDE=%016RX64.\n",
712 GCPtr, pCur->pszDesc, Pde.u));
713#else
714 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
715 " PDE=%016RX64.\n",
716 GCPtr, Pde.u));
717#endif
718 return true;
719 }
720 GCPtr += (1 << X86_PD_PAE_SHIFT);
721 }
722 }
723 }
724 else
725 AssertFailed();
726
727 return false;
728}
729
730
731/**
732 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
733 *
734 * @returns VBox status.
735 * @param pVM The virtual machine.
736 */
737VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
738{
739 /*
740 * Can skip this if mappings are safely fixed.
741 */
742 if (pVM->pgm.s.fMappingsFixed)
743 return VINF_SUCCESS;
744
745 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
746 Assert(enmGuestMode <= PGMMODE_PAE_NX);
747
748 if (enmGuestMode == PGMMODE_32_BIT)
749 {
750 /*
751 * Resolve the page directory.
752 */
753 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
754 Assert(pPD);
755
756 /*
757 * Iterate mappings.
758 */
759 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
760 {
761 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
762 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
763 unsigned iPT = pCur->cPTs;
764 while (iPT-- > 0)
765 {
766 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
767 && ( pVM->fRawR0Enabled
768 || pPD->a[iPDE + iPT].n.u1User))
769 {
770 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
771
772#ifdef IN_RING3
773 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
774 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
775 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
776 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
777 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
778 AssertRCReturn(rc, rc);
779 break;
780#else
781 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
782 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
783 (iPT + iPDE) << X86_PD_SHIFT,
784 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
785 return VINF_PGM_SYNC_CR3;
786#endif
787 }
788 }
789 pCur = pNext;
790 }
791 }
792 else if ( enmGuestMode == PGMMODE_PAE
793 || enmGuestMode == PGMMODE_PAE_NX)
794 {
795 /*
796 * Iterate mappings.
797 */
798 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
799 {
800 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
801 RTGCPTR GCPtr = pCur->GCPtr;
802 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
803 while (iPT-- > 0)
804 {
805 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
806
807 if ( Pde.n.u1Present
808 && (pVM->fRawR0Enabled || Pde.n.u1User))
809 {
810 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
811#ifdef IN_RING3
812 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
813 " PDE=%016RX64.\n",
814 GCPtr, pCur->pszDesc, Pde.u));
815 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
816 AssertRCReturn(rc, rc);
817 break;
818#else
819 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
820 " PDE=%016RX64.\n",
821 GCPtr, Pde.u));
822 return VINF_PGM_SYNC_CR3;
823#endif
824 }
825 GCPtr += (1 << X86_PD_PAE_SHIFT);
826 }
827 pCur = pNext;
828 }
829 }
830 else
831 AssertFailed();
832
833 Assert(!PGMMapHasConflicts(pVM));
834 return VINF_SUCCESS;
835}
836
837#endif /* IN_RING0 */
838
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette