VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 18729

Last change on this file since 18729 was 18725, checked in by vboxsync, 16 years ago

PGMAllMap.cpp: relaxed two assertions (for now) to the thru reset.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 32.3 KB
Line 
1/* $Id: PGMAllMap.cpp 18725 2009-04-05 18:13:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
227 return; /* too early */
228
229 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
230 Assert(enmShadowMode <= PGMMODE_PAE_NX);
231
232 /*
233 * Insert the page tables into the shadow page directories.
234 */
235 unsigned i = pMap->cPTs;
236 iNewPDE += i;
237 while (i-- > 0)
238 {
239 iNewPDE--;
240
241 switch (enmShadowMode)
242 {
243 case PGMMODE_32_BIT:
244 {
245 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
246 AssertFatal(pShw32BitPd);
247#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
248 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
249#endif
250 /* Free any previous user, unless it's us. */
251 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
252 || (pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPT);
253 if ( pShw32BitPd->a[iNewPDE].n.u1Present
254 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
255 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
256
257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */
258 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
259 | (uint32_t)pMap->aPTs[i].HCPhysPT;
260#ifdef IN_RC
261 /* Unlock dynamic mappings again. */
262 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
263#endif
264 break;
265 }
266
267 case PGMMODE_PAE:
268 case PGMMODE_PAE_NX:
269 {
270 const uint32_t iPdPt = iNewPDE / 256;
271 unsigned iPaePde = iNewPDE * 2 % 512;
272 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
273 Assert(pShwPdpt);
274#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
275 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
276#endif
277
278 /*
279 * Get the shadow PD.
280 * If no PD, sync it (PAE guest) or fake (not present or 32-bit guest).
281 * Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the
282 * accessed bit causes invalid VT-x guest state errors.
283 */
284 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, iPdPt << X86_PDPT_SHIFT);
285 if (!pShwPaePd)
286 {
287 X86PDPE GstPdpe;
288 if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
289 GstPdpe.u = X86_PDPE_P;
290 else
291 {
292 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, iPdPt << X86_PDPT_SHIFT);
293 if (pGstPdpe)
294 GstPdpe = *pGstPdpe;
295 else
296 GstPdpe.u = X86_PDPE_P;
297 }
298 int rc = pgmShwSyncPaePDPtr(pVM, iPdPt << X86_PDPT_SHIFT, &GstPdpe, &pShwPaePd);
299 AssertFatalRC(rc);
300 }
301 Assert(pShwPaePd);
302#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
303 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
304#endif
305
306 /*
307 * Mark the page as locked; disallow flushing.
308 */
309 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
310 AssertFatal(pPoolPagePd);
311 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
312 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
313#ifdef VBOX_STRICT
314 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)
315 {
316 Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */
317 AssertFatalMsg( (pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0
318 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVM)),
319 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
320 Assert(pShwPaePd->a[iPaePde+1].u & PGM_PDFLAGS_MAPPING);
321 AssertFatalMsg( (pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1
322 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVM)),
323 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
324 }
325#endif
326
327 /*
328 * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us).
329 */
330 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
331 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0);
332 if ( pShwPaePd->a[iPaePde].n.u1Present
333 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
334 {
335 Assert(!(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
336 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
337 }
338 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
339 | pMap->aPTs[i].HCPhysPaePT0;
340
341 /* 2nd 2 MB PDE of the 4 MB region, same as above. */
342 iPaePde++;
343 AssertFatal(iPaePde < 512);
344 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
345 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1);
346 if ( pShwPaePd->a[iPaePde].n.u1Present
347 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
348 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
349 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
350 | pMap->aPTs[i].HCPhysPaePT1;
351
352 /*
353 * Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode)
354 */
355 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
356
357#ifdef IN_RC
358 /* Unlock dynamic mappings again. */
359 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
360 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
361#endif
362 break;
363 }
364
365 default:
366 AssertFailed();
367 break;
368 }
369 }
370}
371
372
373/**
374 * Clears all PDEs involved with the mapping in the shadow page table.
375 *
376 * @param pVM The VM handle.
377 * @param pShwPageCR3 CR3 root page
378 * @param pMap Pointer to the mapping in question.
379 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
380 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
381 */
382void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
383{
384 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
385
386 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
387 return;
388
389 Assert(pShwPageCR3);
390# ifdef IN_RC
391 Assert(pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3));
392# endif
393
394 PX86PDPT pCurrentShwPdpt = NULL;
395 if ( PGMGetGuestMode(pVM) >= PGMMODE_PAE
396 && pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3))
397 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
398
399 unsigned i = pMap->cPTs;
400 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
401
402 iOldPDE += i;
403 while (i-- > 0)
404 {
405 iOldPDE--;
406
407 switch(enmShadowMode)
408 {
409 case PGMMODE_32_BIT:
410 {
411 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
412 AssertFatal(pShw32BitPd);
413
414 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
415 pShw32BitPd->a[iOldPDE].u = 0;
416 break;
417 }
418
419 case PGMMODE_PAE:
420 case PGMMODE_PAE_NX:
421 {
422 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
423 unsigned iPaePde = iOldPDE * 2 % 512;
424 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
425 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
426
427 /*
428 * Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode)
429 */
430 if (fDeactivateCR3)
431 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
432 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
433 {
434 /* See if there are any other mappings here. This is suboptimal code. */
435 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
436 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
437 if ( pCur != pMap
438 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
439 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
440 {
441 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
442 break;
443 }
444 }
445
446 /*
447 * If the page directory of the old CR3 is reused in the new one, then don't
448 * clear the hypervisor mappings.
449 */
450 if ( pCurrentShwPdpt
451 && (pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) )
452 {
453 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
454 break;
455 }
456
457 /*
458 * Clear the mappings in the PD.
459 */
460 AssertFatal(pShwPaePd);
461 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
462 pShwPaePd->a[iPaePde].u = 0;
463
464 iPaePde++;
465 AssertFatal(iPaePde < 512);
466 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
467 pShwPaePd->a[iPaePde].u = 0;
468
469 /*
470 * Unlock the shadow pool PD page if the PDPTE no longer holds any mappings.
471 */
472 if ( fDeactivateCR3
473 || !(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
474 {
475 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
476 AssertFatal(pPoolPagePd);
477 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
478 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
479 }
480 break;
481 }
482
483 default:
484 AssertFailed();
485 break;
486 }
487 }
488}
489#endif /* !IN_RING0 */
490
491#if defined(VBOX_STRICT) && !defined(IN_RING0)
492/**
493 * Clears all PDEs involved with the mapping in the shadow page table.
494 *
495 * @param pVM The VM handle.
496 * @param pShwPageCR3 CR3 root page
497 * @param pMap Pointer to the mapping in question.
498 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
499 */
500static void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
501{
502 Assert(pShwPageCR3);
503
504 uint32_t i = pMap->cPTs;
505 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
506
507 iPDE += i;
508 while (i-- > 0)
509 {
510 iPDE--;
511
512 switch (enmShadowMode)
513 {
514 case PGMMODE_32_BIT:
515 {
516 PCX86PD pShw32BitPd = (PCX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
517 AssertFatal(pShw32BitPd);
518
519 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
520 ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
521 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
522 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
523 break;
524 }
525
526 case PGMMODE_PAE:
527 case PGMMODE_PAE_NX:
528 {
529 const unsigned iPdpt = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
530 unsigned iPaePDE = iPDE * 2 % 512;
531 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
532 PCX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, iPdpt << X86_PDPT_SHIFT);
533 AssertFatal(pShwPaePd);
534
535 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
536 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
537 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
538 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
539
540 iPaePDE++;
541 AssertFatal(iPaePDE < 512);
542
543 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
544 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
545 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
546 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
547
548 AssertMsg(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING,
549 ("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
550 pShwPdpt->a[iPdpt].u,
551 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
552
553 PCPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
554 AssertFatal(pPoolPagePd);
555 AssertMsg(pPoolPagePd->fLocked, (".idx=%d .type=%d\n", pPoolPagePd->idx, pPoolPagePd->enmKind));
556 break;
557 }
558
559 default:
560 AssertFailed();
561 break;
562 }
563 }
564}
565
566
567/**
568 * Check the hypervisor mappings in the active CR3.
569 *
570 * @param pVM The virtual machine.
571 */
572VMMDECL(void) PGMMapCheck(PVM pVM)
573{
574 /*
575 * Can skip this if mappings are disabled.
576 */
577 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
578 return;
579
580 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
581
582 /*
583 * Iterate mappings.
584 */
585 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
586 {
587 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
588 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
589 }
590}
591#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
592
593#ifndef IN_RING0
594
595/**
596 * Apply the hypervisor mappings to the active CR3.
597 *
598 * @returns VBox status.
599 * @param pVM The virtual machine.
600 * @param pShwPageCR3 CR3 root page
601 */
602int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
603{
604 /*
605 * Can skip this if mappings are disabled.
606 */
607 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
608 return VINF_SUCCESS;
609
610 /* Note. A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
611 Log4(("pgmMapActivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
612
613 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
614
615 /*
616 * Iterate mappings.
617 */
618 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
619 {
620 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
621 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
622 }
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Remove the hypervisor mappings from the specified CR3
629 *
630 * @returns VBox status.
631 * @param pVM The virtual machine.
632 * @param pShwPageCR3 CR3 root page
633 */
634int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
635{
636 /*
637 * Can skip this if mappings are disabled.
638 */
639 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
640 return VINF_SUCCESS;
641
642 Assert(pShwPageCR3);
643 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
644
645 /*
646 * Iterate mappings.
647 */
648 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
649 {
650 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
651 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
652 }
653 return VINF_SUCCESS;
654}
655
656
657/**
658 * Checks guest PD for conflicts with VMM GC mappings.
659 *
660 * @returns true if conflict detected.
661 * @returns false if not.
662 * @param pVM The virtual machine.
663 */
664VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
665{
666 /*
667 * Can skip this if mappings are safely fixed.
668 */
669 if (pVM->pgm.s.fMappingsFixed)
670 return false;
671
672 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
673 Assert(enmGuestMode <= PGMMODE_PAE_NX);
674
675 /*
676 * Iterate mappings.
677 */
678 if (enmGuestMode == PGMMODE_32_BIT)
679 {
680 /*
681 * Resolve the page directory.
682 */
683 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
684 Assert(pPD);
685
686 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
687 {
688 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
689 unsigned iPT = pCur->cPTs;
690 while (iPT-- > 0)
691 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
692 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
693 {
694 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
695
696#ifdef IN_RING3
697 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
698 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
699 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
700 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
701#else
702 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
703 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
704 (iPT + iPDE) << X86_PD_SHIFT,
705 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
706#endif
707 return true;
708 }
709 }
710 }
711 else if ( enmGuestMode == PGMMODE_PAE
712 || enmGuestMode == PGMMODE_PAE_NX)
713 {
714 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
715 {
716 RTGCPTR GCPtr = pCur->GCPtr;
717
718 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
719 while (iPT-- > 0)
720 {
721 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
722
723 if ( Pde.n.u1Present
724 && (pVM->fRawR0Enabled || Pde.n.u1User))
725 {
726 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
727#ifdef IN_RING3
728 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
729 " PDE=%016RX64.\n",
730 GCPtr, pCur->pszDesc, Pde.u));
731#else
732 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
733 " PDE=%016RX64.\n",
734 GCPtr, Pde.u));
735#endif
736 return true;
737 }
738 GCPtr += (1 << X86_PD_PAE_SHIFT);
739 }
740 }
741 }
742 else
743 AssertFailed();
744
745 return false;
746}
747
748
749/**
750 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
751 *
752 * @returns VBox status.
753 * @param pVM The virtual machine.
754 */
755VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
756{
757 /*
758 * Can skip this if mappings are safely fixed.
759 */
760 if (pVM->pgm.s.fMappingsFixed)
761 return VINF_SUCCESS;
762
763 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
764 Assert(enmGuestMode <= PGMMODE_PAE_NX);
765
766 if (enmGuestMode == PGMMODE_32_BIT)
767 {
768 /*
769 * Resolve the page directory.
770 */
771 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
772 Assert(pPD);
773
774 /*
775 * Iterate mappings.
776 */
777 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
778 {
779 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
780 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
781 unsigned iPT = pCur->cPTs;
782 while (iPT-- > 0)
783 {
784 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
785 && ( pVM->fRawR0Enabled
786 || pPD->a[iPDE + iPT].n.u1User))
787 {
788 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
789
790#ifdef IN_RING3
791 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
792 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
793 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
794 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
795 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
796 AssertRCReturn(rc, rc);
797 break;
798#else
799 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
800 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
801 (iPT + iPDE) << X86_PD_SHIFT,
802 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
803 return VINF_PGM_SYNC_CR3;
804#endif
805 }
806 }
807 pCur = pNext;
808 }
809 }
810 else if ( enmGuestMode == PGMMODE_PAE
811 || enmGuestMode == PGMMODE_PAE_NX)
812 {
813 /*
814 * Iterate mappings.
815 */
816 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
817 {
818 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
819 RTGCPTR GCPtr = pCur->GCPtr;
820 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
821 while (iPT-- > 0)
822 {
823 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
824
825 if ( Pde.n.u1Present
826 && (pVM->fRawR0Enabled || Pde.n.u1User))
827 {
828 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
829#ifdef IN_RING3
830 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
831 " PDE=%016RX64.\n",
832 GCPtr, pCur->pszDesc, Pde.u));
833 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
834 AssertRCReturn(rc, rc);
835 break;
836#else
837 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
838 " PDE=%016RX64.\n",
839 GCPtr, Pde.u));
840 return VINF_PGM_SYNC_CR3;
841#endif
842 }
843 GCPtr += (1 << X86_PD_PAE_SHIFT);
844 }
845 pCur = pNext;
846 }
847 }
848 else
849 AssertFailed();
850
851 Assert(!PGMMapHasConflicts(pVM));
852 return VINF_SUCCESS;
853}
854
855#endif /* IN_RING0 */
856
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette