VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 17659

Last change on this file since 17659 was 17639, checked in by vboxsync, 16 years ago

More locking required for dynamic mappings in RC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 29.6 KB
Line 
1/* $Id: PGMAllMap.cpp 17639 2009-03-10 15:31:20Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
227 return; /* too early */
228
229 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
230 Assert(enmShadowMode <= PGMMODE_PAE_NX);
231
232 /*
233 * Init the page tables and insert them into the page directories.
234 */
235 unsigned i = pMap->cPTs;
236 iNewPDE += i;
237 while (i-- > 0)
238 {
239 iNewPDE--;
240
241 switch(enmShadowMode)
242 {
243 case PGMMODE_32_BIT:
244 {
245 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
246 AssertFatal(pShw32BitPd);
247#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
248 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
249#endif
250 if ( pShw32BitPd->a[iNewPDE].n.u1Present
251 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
252 {
253 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
254 }
255
256 X86PDE Pde;
257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
258 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
259 pShw32BitPd->a[iNewPDE] = Pde;
260#ifdef IN_RC
261 /* Unlock dynamic mappings again. */
262 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
263#endif
264 break;
265 }
266
267 case PGMMODE_PAE:
268 case PGMMODE_PAE_NX:
269 {
270 PX86PDPT pShwPdpt;
271 PX86PDPAE pShwPaePd;
272 const unsigned iPdPt = iNewPDE / 256;
273 unsigned iPDE = iNewPDE * 2 % 512;
274
275 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
276 Assert(pShwPdpt);
277#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
278 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
279#endif
280 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
281 if (!pShwPaePd)
282 {
283 X86PDPE GstPdpe;
284
285 if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
286 {
287 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
288 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
289 }
290 else
291 {
292 PX86PDPE pGstPdpe;
293 pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
294 if (pGstPdpe)
295 GstPdpe = *pGstPdpe;
296 else
297 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
298 }
299 int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
300 AssertFatal(RT_SUCCESS(rc));
301 if (rc != VINF_SUCCESS)
302 {
303 rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
304 AssertFatalMsg(rc == VINF_SUCCESS, ("rc = %Rrc\n", rc));
305 }
306 }
307 Assert(pShwPaePd);
308#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
309 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
310#endif
311 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
312 AssertFatal(pPoolPagePd);
313
314 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
315 {
316 /* Mark the page as locked; disallow flushing. */
317 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
318 }
319# ifdef VBOX_STRICT
320 else
321 if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)
322 {
323 Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE);
324 AssertFatalMsg((pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
325 Assert(pShwPaePd->a[iPDE+1].u & PGM_PDFLAGS_MAPPING);
326 AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
327 }
328# endif
329 if ( pShwPaePd->a[iPDE].n.u1Present
330 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
331 {
332 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
333 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
334 }
335
336 X86PDEPAE PdePae0;
337 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
338 pShwPaePd->a[iPDE] = PdePae0;
339
340 /* 2nd 2 MB PDE of the 4 MB region */
341 iPDE++;
342 AssertFatal(iPDE < 512);
343
344 if ( pShwPaePd->a[iPDE].n.u1Present
345 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
346 {
347 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
348 }
349 X86PDEPAE PdePae1;
350 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
351 pShwPaePd->a[iPDE] = PdePae1;
352
353 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
354 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
355
356#ifdef IN_RC
357 /* Unlock dynamic mappings again. */
358 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
359 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
360#endif
361 break;
362 }
363
364 default:
365 AssertFailed();
366 break;
367 }
368 }
369}
370
371/**
372 * Clears all PDEs involved with the mapping in the shadow page table.
373 *
374 * @param pVM The VM handle.
375 * @param pShwPageCR3 CR3 root page
376 * @param pMap Pointer to the mapping in question.
377 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
378 */
379void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE)
380{
381 Log(("pgmMapClearShadowPDEs old pde %x (cPTs=%x) (mappings enabled %d)\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
382
383 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
384 return;
385
386 Assert(pShwPageCR3);
387# ifdef IN_RC
388 Assert(pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3));
389# endif
390
391 PX86PDPT pCurrentShwPdpt = NULL;
392
393 if ( PGMGetGuestMode(pVM) >= PGMMODE_PAE
394 && pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3))
395 {
396 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
397 }
398
399 unsigned i = pMap->cPTs;
400 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
401
402 iOldPDE += i;
403 while (i-- > 0)
404 {
405 iOldPDE--;
406
407 switch(enmShadowMode)
408 {
409 case PGMMODE_32_BIT:
410 {
411 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
412 AssertFatal(pShw32BitPd);
413
414 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
415 pShw32BitPd->a[iOldPDE].u = 0;
416 break;
417 }
418
419 case PGMMODE_PAE:
420 case PGMMODE_PAE_NX:
421 {
422 PX86PDPT pShwPdpt = NULL;
423 PX86PDPAE pShwPaePd = NULL;
424
425 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
426 unsigned iPDE = iOldPDE * 2 % 512;
427 pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
428 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
429
430 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
431 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
432
433 if (pCurrentShwPdpt)
434 {
435 /* If the page directory of the old CR3 is reused in the new one, then don't clear the hypervisor mappings. */
436 if ((pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK))
437 {
438 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
439 break;
440 }
441 }
442 AssertFatal(pShwPaePd);
443
444 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
445 pShwPaePd->a[iPDE].u = 0;
446
447 iPDE++;
448 AssertFatal(iPDE < 512);
449
450 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
451 pShwPaePd->a[iPDE].u = 0;
452
453 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
454 AssertFatal(pPoolPagePd);
455
456 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
457 {
458 /* Mark the page as unlocked; allow flushing again. */
459 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
460 }
461 break;
462 }
463
464 default:
465 AssertFailed();
466 break;
467 }
468 }
469}
470#endif /* !IN_RING0 */
471
472#if defined(VBOX_STRICT) && !defined(IN_RING0)
473/**
474 * Clears all PDEs involved with the mapping in the shadow page table.
475 *
476 * @param pVM The VM handle.
477 * @param pShwPageCR3 CR3 root page
478 * @param pMap Pointer to the mapping in question.
479 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
480 */
481void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
482{
483 Assert(pShwPageCR3);
484
485 unsigned i = pMap->cPTs;
486 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
487
488 iPDE += i;
489 while (i-- > 0)
490 {
491 iPDE--;
492
493 switch(enmShadowMode)
494 {
495 case PGMMODE_32_BIT:
496 {
497 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
498 AssertFatal(pShw32BitPd);
499
500 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
501 ("Expected %x vs %x\n", pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
502 break;
503 }
504
505 case PGMMODE_PAE:
506 case PGMMODE_PAE_NX:
507 {
508 PX86PDPT pPdpt = NULL;
509 PX86PDPAE pShwPaePd = NULL;
510
511 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
512 unsigned iPaePDE = iPDE * 2 % 512;
513 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
514 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
515 AssertFatal(pShwPaePd);
516
517 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
518 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT0)));
519
520 iPaePDE++;
521 AssertFatal(iPaePDE < 512);
522
523 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
524 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT1)));
525
526 Assert(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING);
527 break;
528 }
529
530 default:
531 AssertFailed();
532 break;
533 }
534 }
535}
536
537/**
538 * Check the hypervisor mappings in the active CR3.
539 *
540 * @param pVM The virtual machine.
541 */
542VMMDECL(void) PGMMapCheck(PVM pVM)
543{
544 /*
545 * Can skip this if mappings are disabled.
546 */
547 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
548 return;
549
550 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
551
552 /*
553 * Iterate mappings.
554 */
555 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
556 {
557 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
558
559 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
560 }
561}
562#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
563
564#ifndef IN_RING0
565/**
566 * Apply the hypervisor mappings to the active CR3.
567 *
568 * @returns VBox status.
569 * @param pVM The virtual machine.
570 * @param pShwPageCR3 CR3 root page
571 */
572int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
573{
574 /*
575 * Can skip this if mappings are disabled.
576 */
577 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
578 return VINF_SUCCESS;
579
580 /* @note A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
581 Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
582
583 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
584
585 /*
586 * Iterate mappings.
587 */
588 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
589 {
590 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
591
592 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
593 }
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * Remove the hypervisor mappings from the specified CR3
600 *
601 * @returns VBox status.
602 * @param pVM The virtual machine.
603 * @param pShwPageCR3 CR3 root page
604 */
605int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
606{
607 /*
608 * Can skip this if mappings are disabled.
609 */
610 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
611 return VINF_SUCCESS;
612
613 Assert(pShwPageCR3);
614
615 /*
616 * Iterate mappings.
617 */
618 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
619 {
620 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
621
622 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE);
623 }
624 return VINF_SUCCESS;
625}
626
627/**
628 * Checks guest PD for conflicts with VMM GC mappings.
629 *
630 * @returns true if conflict detected.
631 * @returns false if not.
632 * @param pVM The virtual machine.
633 */
634VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
635{
636 /*
637 * Can skip this if mappings are safely fixed.
638 */
639 if (pVM->pgm.s.fMappingsFixed)
640 return false;
641
642 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
643 Assert(enmGuestMode <= PGMMODE_PAE_NX);
644
645 /*
646 * Iterate mappings.
647 */
648 if (enmGuestMode == PGMMODE_32_BIT)
649 {
650 /*
651 * Resolve the page directory.
652 */
653 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
654 Assert(pPD);
655
656 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
657 {
658 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
659 unsigned iPT = pCur->cPTs;
660 while (iPT-- > 0)
661 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
662 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
663 {
664 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
665
666#ifdef IN_RING3
667 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
668 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
669 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
670 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
671#else
672 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
673 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
674 (iPT + iPDE) << X86_PD_SHIFT,
675 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
676#endif
677 return true;
678 }
679 }
680 }
681 else if ( enmGuestMode == PGMMODE_PAE
682 || enmGuestMode == PGMMODE_PAE_NX)
683 {
684 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
685 {
686 RTGCPTR GCPtr = pCur->GCPtr;
687
688 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
689 while (iPT-- > 0)
690 {
691 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
692
693 if ( Pde.n.u1Present
694 && (pVM->fRawR0Enabled || Pde.n.u1User))
695 {
696 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
697#ifdef IN_RING3
698 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
699 " PDE=%016RX64.\n",
700 GCPtr, pCur->pszDesc, Pde.u));
701#else
702 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
703 " PDE=%016RX64.\n",
704 GCPtr, Pde.u));
705#endif
706 return true;
707 }
708 GCPtr += (1 << X86_PD_PAE_SHIFT);
709 }
710 }
711 }
712 else
713 AssertFailed();
714
715 return false;
716}
717
718/**
719 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
720 *
721 * @returns VBox status.
722 * @param pVM The virtual machine.
723 */
724VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
725{
726 /*
727 * Can skip this if mappings are safely fixed.
728 */
729 if (pVM->pgm.s.fMappingsFixed)
730 return VINF_SUCCESS;
731
732 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
733 Assert(enmGuestMode <= PGMMODE_PAE_NX);
734
735 /*
736 * Iterate mappings.
737 */
738 if (enmGuestMode == PGMMODE_32_BIT)
739 {
740 /*
741 * Resolve the page directory.
742 */
743 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
744 Assert(pPD);
745
746 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
747 {
748 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
749 unsigned iPT = pCur->cPTs;
750 while (iPT-- > 0)
751 {
752 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
753 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
754 {
755 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
756
757#ifdef IN_RING3
758 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
759 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
760 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
761 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
762 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
763 AssertRCReturn(rc, rc);
764
765 /*
766 * Update pCur.
767 */
768 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
769 while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))
770 pCur = pCur->CTX_SUFF(pNext);
771 break;
772#else
773 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
774 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
775 (iPT + iPDE) << X86_PD_SHIFT,
776 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
777 return VINF_PGM_SYNC_CR3;
778#endif
779 }
780 }
781 if (!pCur)
782 break;
783 }
784 }
785 else if ( enmGuestMode == PGMMODE_PAE
786 || enmGuestMode == PGMMODE_PAE_NX)
787 {
788 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
789 {
790 RTGCPTR GCPtr = pCur->GCPtr;
791
792 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
793 while (iPT-- > 0)
794 {
795 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
796
797 if ( Pde.n.u1Present
798 && (pVM->fRawR0Enabled || Pde.n.u1User))
799 {
800 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
801#ifdef IN_RING3
802 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
803 " PDE=%016RX64.\n",
804 GCPtr, pCur->pszDesc, Pde.u));
805 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
806 AssertRCReturn(rc, rc);
807
808 /*
809 * Update pCur.
810 */
811 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
812 while (pCur && pCur->GCPtr < GCPtr)
813 pCur = pCur->CTX_SUFF(pNext);
814 break;
815#else
816 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
817 " PDE=%016RX64.\n",
818 GCPtr, Pde.u));
819 return VINF_PGM_SYNC_CR3;
820#endif
821 }
822 GCPtr += (1 << X86_PD_PAE_SHIFT);
823 }
824 if (!pCur)
825 break;
826 }
827 }
828 else
829 AssertFailed();
830
831 return VINF_SUCCESS;
832}
833
834#endif /* IN_RING0 */
835
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette