VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 17595

Last change on this file since 17595 was 17593, checked in by vboxsync, 16 years ago

Backed out 44052

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 28.6 KB
Line 
1/* $Id: PGMAllMap.cpp 17593 2009-03-09 17:11:35Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
227 return; /* too early */
228
229 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
230 Assert(enmShadowMode <= PGMMODE_PAE_NX);
231
232 /*
233 * Init the page tables and insert them into the page directories.
234 */
235 unsigned i = pMap->cPTs;
236 iNewPDE += i;
237 while (i-- > 0)
238 {
239 iNewPDE--;
240
241 switch(enmShadowMode)
242 {
243 case PGMMODE_32_BIT:
244 {
245 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
246 AssertFatal(pShw32BitPd);
247
248 if ( pShw32BitPd->a[iNewPDE].n.u1Present
249 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
250 {
251 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
252 }
253
254 X86PDE Pde;
255 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
256 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
257 pShw32BitPd->a[iNewPDE] = Pde;
258 break;
259 }
260
261 case PGMMODE_PAE:
262 case PGMMODE_PAE_NX:
263 {
264 PX86PDPT pShwPdpt;
265 PX86PDPAE pShwPaePd;
266 const unsigned iPdPt = iNewPDE / 256;
267 unsigned iPDE = iNewPDE * 2 % 512;
268
269 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
270 Assert(pShwPdpt);
271 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
272 if (!pShwPaePd)
273 {
274 X86PDPE GstPdpe;
275
276 if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
277 {
278 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
279 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
280 }
281 else
282 {
283 PX86PDPE pGstPdpe;
284 pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
285 if (pGstPdpe)
286 GstPdpe = *pGstPdpe;
287 else
288 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
289 }
290 int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
291 AssertFatal(RT_SUCCESS(rc));
292 if (rc != VINF_SUCCESS)
293 {
294 rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
295 AssertFatalMsg(rc == VINF_SUCCESS, ("rc = %Rrc\n", rc));
296 }
297 }
298 AssertFatal(pShwPaePd);
299
300 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
301 AssertFatal(pPoolPagePd);
302
303 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
304 {
305 /* Mark the page as locked; disallow flushing. */
306 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
307 }
308# ifdef VBOX_STRICT
309 else
310 if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)
311 {
312 Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE);
313 AssertFatalMsg((pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
314 Assert(pShwPaePd->a[iPDE+1].u & PGM_PDFLAGS_MAPPING);
315 AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
316 }
317# endif
318 if ( pShwPaePd->a[iPDE].n.u1Present
319 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
320 {
321 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
322 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
323 }
324
325 X86PDEPAE PdePae0;
326 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
327 pShwPaePd->a[iPDE] = PdePae0;
328
329 /* 2nd 2 MB PDE of the 4 MB region */
330 iPDE++;
331 AssertFatal(iPDE < 512);
332
333 if ( pShwPaePd->a[iPDE].n.u1Present
334 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
335 {
336 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
337 }
338 X86PDEPAE PdePae1;
339 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
340 pShwPaePd->a[iPDE] = PdePae1;
341
342 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
343 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
344 break;
345 }
346
347 default:
348 AssertFailed();
349 break;
350 }
351 }
352}
353
354/**
355 * Clears all PDEs involved with the mapping in the shadow page table.
356 *
357 * @param pVM The VM handle.
358 * @param pShwPageCR3 CR3 root page
359 * @param pMap Pointer to the mapping in question.
360 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
361 */
362void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE)
363{
364 Log(("pgmMapClearShadowPDEs old pde %x (mappings enabled %d)\n", iOldPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
365
366 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
367 return;
368
369 Assert(pShwPageCR3);
370# ifdef IN_RC
371 Assert(pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3));
372# endif
373
374 PX86PDPT pCurrentShwPdpt = NULL;
375
376 if ( PGMGetGuestMode(pVM) >= PGMMODE_PAE
377 && pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3))
378 {
379 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
380 }
381
382 unsigned i = pMap->cPTs;
383 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
384
385 iOldPDE += i;
386 while (i-- > 0)
387 {
388 iOldPDE--;
389
390 switch(enmShadowMode)
391 {
392 case PGMMODE_32_BIT:
393 {
394 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
395 AssertFatal(pShw32BitPd);
396
397 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
398 pShw32BitPd->a[iOldPDE].u = 0;
399 break;
400 }
401
402 case PGMMODE_PAE:
403 case PGMMODE_PAE_NX:
404 {
405 PX86PDPT pShwPdpt = NULL;
406 PX86PDPAE pShwPaePd = NULL;
407
408 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
409 unsigned iPDE = iOldPDE * 2 % 512;
410 pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
411 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
412
413 if (pCurrentShwPdpt)
414 {
415 /* If the page directory of the old CR3 is reused in the new one, then don't clear the hypervisor mappings. */
416 if ((pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK))
417 break;
418 }
419 AssertFatal(pShwPaePd);
420
421 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
422 pShwPaePd->a[iPDE].u = 0;
423
424 iPDE++;
425 AssertFatal(iPDE < 512);
426
427 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
428 pShwPaePd->a[iPDE].u = 0;
429 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
430 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
431
432 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
433 AssertFatal(pPoolPagePd);
434
435 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
436 {
437 /* Mark the page as unlocked; allow flushing again. */
438 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
439 }
440 break;
441 }
442
443 default:
444 AssertFailed();
445 break;
446 }
447 }
448}
449#endif /* !IN_RING0 */
450
451#if defined(VBOX_STRICT) && !defined(IN_RING0)
452/**
453 * Clears all PDEs involved with the mapping in the shadow page table.
454 *
455 * @param pVM The VM handle.
456 * @param pShwPageCR3 CR3 root page
457 * @param pMap Pointer to the mapping in question.
458 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
459 */
460void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
461{
462 Assert(pShwPageCR3);
463
464 unsigned i = pMap->cPTs;
465 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
466
467 iPDE += i;
468 while (i-- > 0)
469 {
470 iPDE--;
471
472 switch(enmShadowMode)
473 {
474 case PGMMODE_32_BIT:
475 {
476 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
477 AssertFatal(pShw32BitPd);
478
479 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
480 ("Expected %x vs %x\n", pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
481 break;
482 }
483
484 case PGMMODE_PAE:
485 case PGMMODE_PAE_NX:
486 {
487 PX86PDPT pPdpt = NULL;
488 PX86PDPAE pShwPaePd = NULL;
489
490 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
491 unsigned iPaePDE = iPDE * 2 % 512;
492 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
493 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
494 AssertFatal(pShwPaePd);
495
496 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
497 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT0)));
498
499 iPaePDE++;
500 AssertFatal(iPaePDE < 512);
501
502 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
503 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT1)));
504
505 Assert(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING);
506 break;
507 }
508
509 default:
510 AssertFailed();
511 break;
512 }
513 }
514}
515
516/**
517 * Check the hypervisor mappings in the active CR3.
518 *
519 * @param pVM The virtual machine.
520 */
521VMMDECL(void) PGMMapCheck(PVM pVM)
522{
523 /*
524 * Can skip this if mappings are disabled.
525 */
526 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
527 return;
528
529 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
530
531 /*
532 * Iterate mappings.
533 */
534 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
535 {
536 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
537
538 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
539 }
540}
541#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
542
543#ifndef IN_RING0
544/**
545 * Apply the hypervisor mappings to the active CR3.
546 *
547 * @returns VBox status.
548 * @param pVM The virtual machine.
549 * @param pShwPageCR3 CR3 root page
550 */
551int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
552{
553 /*
554 * Can skip this if mappings are disabled.
555 */
556 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
557 return VINF_SUCCESS;
558
559 /* @note A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
560 Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
561
562 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
563
564 /*
565 * Iterate mappings.
566 */
567 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
568 {
569 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
570
571 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
572 }
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Remove the hypervisor mappings from the specified CR3
579 *
580 * @returns VBox status.
581 * @param pVM The virtual machine.
582 * @param pShwPageCR3 CR3 root page
583 */
584int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
585{
586 /*
587 * Can skip this if mappings are disabled.
588 */
589 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
590 return VINF_SUCCESS;
591
592 Assert(pShwPageCR3);
593
594 /*
595 * Iterate mappings.
596 */
597 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
598 {
599 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
600
601 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE);
602 }
603 return VINF_SUCCESS;
604}
605
606/**
607 * Checks guest PD for conflicts with VMM GC mappings.
608 *
609 * @returns true if conflict detected.
610 * @returns false if not.
611 * @param pVM The virtual machine.
612 */
613VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
614{
615 /*
616 * Can skip this if mappings are safely fixed.
617 */
618 if (pVM->pgm.s.fMappingsFixed)
619 return false;
620
621 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
622 Assert(enmGuestMode <= PGMMODE_PAE_NX);
623
624 /*
625 * Iterate mappings.
626 */
627 if (enmGuestMode == PGMMODE_32_BIT)
628 {
629 /*
630 * Resolve the page directory.
631 */
632 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
633 Assert(pPD);
634
635 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
636 {
637 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
638 unsigned iPT = pCur->cPTs;
639 while (iPT-- > 0)
640 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
641 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
642 {
643 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
644
645#ifdef IN_RING3
646 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
647 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
648 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
649 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
650#else
651 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
652 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
653 (iPT + iPDE) << X86_PD_SHIFT,
654 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
655#endif
656 return true;
657 }
658 }
659 }
660 else if ( enmGuestMode == PGMMODE_PAE
661 || enmGuestMode == PGMMODE_PAE_NX)
662 {
663 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
664 {
665 RTGCPTR GCPtr = pCur->GCPtr;
666
667 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
668 while (iPT-- > 0)
669 {
670 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
671
672 if ( Pde.n.u1Present
673 && (pVM->fRawR0Enabled || Pde.n.u1User))
674 {
675 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
676#ifdef IN_RING3
677 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
678 " PDE=%016RX64.\n",
679 GCPtr, pCur->pszDesc, Pde.u));
680#else
681 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
682 " PDE=%016RX64.\n",
683 GCPtr, Pde.u));
684#endif
685 return true;
686 }
687 GCPtr += (1 << X86_PD_PAE_SHIFT);
688 }
689 }
690 }
691 else
692 AssertFailed();
693
694 return false;
695}
696
697/**
698 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
699 *
700 * @returns VBox status.
701 * @param pVM The virtual machine.
702 */
703VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
704{
705 /*
706 * Can skip this if mappings are safely fixed.
707 */
708 if (pVM->pgm.s.fMappingsFixed)
709 return VINF_SUCCESS;
710
711 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
712 Assert(enmGuestMode <= PGMMODE_PAE_NX);
713
714 /*
715 * Iterate mappings.
716 */
717 if (enmGuestMode == PGMMODE_32_BIT)
718 {
719 /*
720 * Resolve the page directory.
721 */
722 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
723 Assert(pPD);
724
725 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
726 {
727 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
728 unsigned iPT = pCur->cPTs;
729 while (iPT-- > 0)
730 {
731 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
732 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
733 {
734 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
735
736#ifdef IN_RING3
737 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
738 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
739 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
740 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
741 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
742 AssertRCReturn(rc, rc);
743
744 /*
745 * Update pCur.
746 */
747 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
748 while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))
749 pCur = pCur->CTX_SUFF(pNext);
750 break;
751#else
752 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
753 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
754 (iPT + iPDE) << X86_PD_SHIFT,
755 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
756 return VINF_PGM_SYNC_CR3;
757#endif
758 }
759 }
760 if (!pCur)
761 break;
762 }
763 }
764 else if ( enmGuestMode == PGMMODE_PAE
765 || enmGuestMode == PGMMODE_PAE_NX)
766 {
767 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
768 {
769 RTGCPTR GCPtr = pCur->GCPtr;
770
771 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
772 while (iPT-- > 0)
773 {
774 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
775
776 if ( Pde.n.u1Present
777 && (pVM->fRawR0Enabled || Pde.n.u1User))
778 {
779 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
780#ifdef IN_RING3
781 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
782 " PDE=%016RX64.\n",
783 GCPtr, pCur->pszDesc, Pde.u));
784 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, GCPtr);
785 AssertRCReturn(rc, rc);
786
787 /*
788 * Update pCur.
789 */
790 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
791 while (pCur && pCur->GCPtr < GCPtr)
792 pCur = pCur->CTX_SUFF(pNext);
793 break;
794#else
795 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
796 " PDE=%016RX64.\n",
797 GCPtr, Pde.u));
798 return VINF_PGM_SYNC_CR3;
799#endif
800 }
801 GCPtr += (1 << X86_PD_PAE_SHIFT);
802 }
803 if (!pCur)
804 break;
805 }
806 }
807 else
808 AssertFailed();
809
810 return VINF_SUCCESS;
811}
812
813#endif /* IN_RING0 */
814
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette