VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 17518

Last change on this file since 17518 was 17468, checked in by vboxsync, 16 years ago

Always free entry if it's not a hypervisor mapping

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.7 KB
Line 
1/* $Id: PGMAllMap.cpp 17468 2009-03-06 13:39:14Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
227 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
228 return; /* too early */
229#endif
230
231 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
232 Assert(enmShadowMode <= PGMMODE_PAE_NX);
233
234 /*
235 * Init the page tables and insert them into the page directories.
236 */
237 unsigned i = pMap->cPTs;
238 iNewPDE += i;
239 while (i-- > 0)
240 {
241 iNewPDE--;
242
243 switch(enmShadowMode)
244 {
245 case PGMMODE_32_BIT:
246 {
247 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
248 AssertFatal(pShw32BitPd);
249
250 if ( pShw32BitPd->a[iNewPDE].n.u1Present
251 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
252 {
253 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
254 }
255
256 X86PDE Pde;
257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
258 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
259 pShw32BitPd->a[iNewPDE] = Pde;
260 break;
261 }
262
263 case PGMMODE_PAE:
264 case PGMMODE_PAE_NX:
265 {
266 PX86PDPT pShwPdpt;
267 PX86PDPAE pShwPaePd;
268 const unsigned iPdPt = iNewPDE / 256;
269 unsigned iPDE = iNewPDE * 2 % 512;
270
271 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
272 Assert(pShwPdpt);
273 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
274#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
275 if (!pShwPaePd)
276 {
277 X86PDPE GstPdpe;
278
279 if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
280 {
281 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
282 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
283 }
284 else
285 {
286 PX86PDPE pGstPdpe;
287 pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
288 if (pGstPdpe)
289 GstPdpe = *pGstPdpe;
290 else
291 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
292 }
293 int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
294 AssertFatal(RT_SUCCESS(rc));
295 if (rc != VINF_SUCCESS)
296 {
297 rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
298 AssertFatalMsg(rc == VINF_SUCCESS, ("rc = %Rrc\n", rc));
299 }
300 }
301#endif
302 AssertFatal(pShwPaePd);
303
304 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
305 AssertFatal(pPoolPagePd);
306
307#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
308 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
309 {
310 /* Mark the page as locked; disallow flushing. */
311 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
312 }
313# ifdef VBOX_STRICT
314 else
315 if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)
316 {
317 Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE);
318 AssertFatalMsg((pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
319 Assert(pShwPaePd->a[iPDE+1].u & PGM_PDFLAGS_MAPPING);
320 AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
321 }
322# endif
323 if ( pShwPaePd->a[iPDE].n.u1Present
324 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
325 {
326 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
327 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
328 }
329
330#else
331 if (pShwPaePd->a[iPDE].n.u1Present)
332 {
333 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
334 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iNewPDE);
335 }
336#endif
337 X86PDEPAE PdePae0;
338 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
339 pShwPaePd->a[iPDE] = PdePae0;
340
341 /* 2nd 2 MB PDE of the 4 MB region */
342 iPDE++;
343 AssertFatal(iPDE < 512);
344
345#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
346 if ( pShwPaePd->a[iPDE].n.u1Present
347 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
348 {
349 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
350 }
351#else
352 if (pShwPaePd->a[iPDE].n.u1Present)
353 {
354 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
355 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iNewPDE);
356 }
357#endif
358 X86PDEPAE PdePae1;
359 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
360 pShwPaePd->a[iPDE] = PdePae1;
361
362 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
363 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
364 break;
365 }
366
367 default:
368 AssertFailed();
369 break;
370 }
371 }
372}
373
374/**
375 * Clears all PDEs involved with the mapping in the shadow page table.
376 *
377 * @param pVM The VM handle.
378 * @param pShwPageCR3 CR3 root page
379 * @param pMap Pointer to the mapping in question.
380 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
381 */
382void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE)
383{
384 Log(("pgmMapClearShadowPDEs old pde %x (mappings enabled %d)\n", iOldPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
385
386 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
387 return;
388
389#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
390 Assert(pShwPageCR3);
391# ifdef IN_RC
392 Assert(pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3));
393# endif
394
395 PX86PDPT pCurrentShwPdpt = NULL;
396
397 if ( PGMGetGuestMode(pVM) >= PGMMODE_PAE
398 && pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3))
399 {
400 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
401 }
402#endif
403
404 unsigned i = pMap->cPTs;
405 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
406
407 iOldPDE += i;
408 while (i-- > 0)
409 {
410 iOldPDE--;
411
412 switch(enmShadowMode)
413 {
414 case PGMMODE_32_BIT:
415 {
416#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
417 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
418#else
419 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
420#endif
421 AssertFatal(pShw32BitPd);
422
423 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
424 pShw32BitPd->a[iOldPDE].u = 0;
425 break;
426 }
427
428 case PGMMODE_PAE:
429 case PGMMODE_PAE_NX:
430 {
431 PX86PDPT pShwPdpt = NULL;
432 PX86PDPAE pShwPaePd = NULL;
433
434 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
435 unsigned iPDE = iOldPDE * 2 % 512;
436#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
437 pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
438 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
439
440 if (pCurrentShwPdpt)
441 {
442 /* If the page directory of the old CR3 is reused in the new one, then don't clear the hypervisor mappings. */
443 if ((pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK))
444 break;
445 }
446#else
447 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
448 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdpt << X86_PDPT_SHIFT));
449#endif
450 AssertFatal(pShwPaePd);
451
452 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
453 pShwPaePd->a[iPDE].u = 0;
454
455 iPDE++;
456 AssertFatal(iPDE < 512);
457
458 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
459 pShwPaePd->a[iPDE].u = 0;
460 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
461 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
462
463#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
464 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
465 AssertFatal(pPoolPagePd);
466
467 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
468 {
469 /* Mark the page as unlocked; allow flushing again. */
470 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
471 }
472#endif
473
474 break;
475 }
476
477 default:
478 AssertFailed();
479 break;
480 }
481 }
482}
483#endif /* !IN_RING0 */
484
485#if defined(VBOX_STRICT) && !defined(IN_RING0)
486/**
487 * Clears all PDEs involved with the mapping in the shadow page table.
488 *
489 * @param pVM The VM handle.
490 * @param pShwPageCR3 CR3 root page
491 * @param pMap Pointer to the mapping in question.
492 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
493 */
494void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
495{
496#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
497 Assert(pShwPageCR3);
498#endif
499
500 unsigned i = pMap->cPTs;
501 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
502
503 iPDE += i;
504 while (i-- > 0)
505 {
506 iPDE--;
507
508 switch(enmShadowMode)
509 {
510 case PGMMODE_32_BIT:
511 {
512#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
513 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
514#else
515 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
516#endif
517 AssertFatal(pShw32BitPd);
518
519 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
520 ("Expected %x vs %x\n", pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
521 break;
522 }
523
524 case PGMMODE_PAE:
525 case PGMMODE_PAE_NX:
526 {
527 PX86PDPT pPdpt = NULL;
528 PX86PDPAE pShwPaePd = NULL;
529
530 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
531 unsigned iPaePDE = iPDE * 2 % 512;
532#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
533 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
534 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
535#else
536 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
537 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
538#endif
539 AssertFatal(pShwPaePd);
540
541 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
542 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT0)));
543
544 iPaePDE++;
545 AssertFatal(iPaePDE < 512);
546
547 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
548 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT1)));
549
550 Assert(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING);
551 break;
552 }
553
554 default:
555 AssertFailed();
556 break;
557 }
558 }
559}
560
561/**
562 * Check the hypervisor mappings in the active CR3.
563 *
564 * @param pVM The virtual machine.
565 */
566VMMDECL(void) PGMMapCheck(PVM pVM)
567{
568#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
569 /*
570 * Can skip this if mappings are disabled.
571 */
572 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
573 return;
574
575# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
576 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
577# endif
578
579 /*
580 * Iterate mappings.
581 */
582 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
583 {
584 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
585
586 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
587 }
588#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
589}
590#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
591
592#ifndef IN_RING0
593/**
594 * Apply the hypervisor mappings to the active CR3.
595 *
596 * @returns VBox status.
597 * @param pVM The virtual machine.
598 * @param pShwPageCR3 CR3 root page
599 */
600int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
601{
602#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
603 /*
604 * Can skip this if mappings are disabled.
605 */
606 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
607#else
608 /*
609 * Can skip this if mappings are safely fixed.
610 */
611 if (pVM->pgm.s.fMappingsFixed)
612#endif
613 return VINF_SUCCESS;
614
615 /* @note A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
616 Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
617
618# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
619 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
620# endif
621
622 /*
623 * Iterate mappings.
624 */
625 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
626 {
627 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
628
629 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
630 }
631 return VINF_SUCCESS;
632}
633
634
635/**
636 * Remove the hypervisor mappings from the specified CR3
637 *
638 * @returns VBox status.
639 * @param pVM The virtual machine.
640 * @param pShwPageCR3 CR3 root page
641 */
642int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
643{
644#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
645 /*
646 * Can skip this if mappings are disabled.
647 */
648 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
649#else
650 /*
651 * Can skip this if mappings are safely fixed.
652 */
653 if (pVM->pgm.s.fMappingsFixed)
654#endif
655 return VINF_SUCCESS;
656
657# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
658 Assert(pShwPageCR3);
659# endif
660
661 /*
662 * Iterate mappings.
663 */
664 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
665 {
666 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
667
668 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE);
669 }
670 return VINF_SUCCESS;
671}
672
673/**
674 * Checks guest PD for conflicts with VMM GC mappings.
675 *
676 * @returns true if conflict detected.
677 * @returns false if not.
678 * @param pVM The virtual machine.
679 */
680VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
681{
682 /*
683 * Can skip this if mappings are safely fixed.
684 */
685 if (pVM->pgm.s.fMappingsFixed)
686 return false;
687
688 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
689 Assert(enmGuestMode <= PGMMODE_PAE_NX);
690
691 /*
692 * Iterate mappings.
693 */
694 if (enmGuestMode == PGMMODE_32_BIT)
695 {
696 /*
697 * Resolve the page directory.
698 */
699 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
700 Assert(pPD);
701
702 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
703 {
704 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
705 unsigned iPT = pCur->cPTs;
706 while (iPT-- > 0)
707 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
708 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
709 {
710 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
711
712#ifdef IN_RING3
713 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
714 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
715 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
716 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
717#else
718 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
719 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
720 (iPT + iPDE) << X86_PD_SHIFT,
721 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
722#endif
723 return true;
724 }
725 }
726 }
727 else if ( enmGuestMode == PGMMODE_PAE
728 || enmGuestMode == PGMMODE_PAE_NX)
729 {
730 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
731 {
732 RTGCPTR GCPtr = pCur->GCPtr;
733
734 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
735 while (iPT-- > 0)
736 {
737 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
738
739 if ( Pde.n.u1Present
740 && (pVM->fRawR0Enabled || Pde.n.u1User))
741 {
742 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
743#ifdef IN_RING3
744 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
745 " PDE=%016RX64.\n",
746 GCPtr, pCur->pszDesc, Pde.u));
747#else
748 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
749 " PDE=%016RX64.\n",
750 GCPtr, Pde.u));
751#endif
752 return true;
753 }
754 GCPtr += (1 << X86_PD_PAE_SHIFT);
755 }
756 }
757 }
758 else
759 AssertFailed();
760
761 return false;
762}
763
764# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
765/**
766 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
767 *
768 * @returns VBox status.
769 * @param pVM The virtual machine.
770 */
771VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
772{
773 /*
774 * Can skip this if mappings are safely fixed.
775 */
776 if (pVM->pgm.s.fMappingsFixed)
777 return VINF_SUCCESS;
778
779 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
780 Assert(enmGuestMode <= PGMMODE_PAE_NX);
781
782 /*
783 * Iterate mappings.
784 */
785 if (enmGuestMode == PGMMODE_32_BIT)
786 {
787 /*
788 * Resolve the page directory.
789 */
790 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
791 Assert(pPD);
792
793 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
794 {
795 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
796 unsigned iPT = pCur->cPTs;
797 while (iPT-- > 0)
798 {
799 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
800 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
801 {
802 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
803
804#ifdef IN_RING3
805 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
806 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
807 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
808 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
809 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
810 AssertRCReturn(rc, rc);
811
812 /*
813 * Update pCur.
814 */
815 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
816 while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))
817 pCur = pCur->CTX_SUFF(pNext);
818 break;
819#else
820 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
821 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
822 (iPT + iPDE) << X86_PD_SHIFT,
823 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
824 return VINF_PGM_SYNC_CR3;
825#endif
826 }
827 }
828 if (!pCur)
829 break;
830 }
831 }
832 else if ( enmGuestMode == PGMMODE_PAE
833 || enmGuestMode == PGMMODE_PAE_NX)
834 {
835 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
836 {
837 RTGCPTR GCPtr = pCur->GCPtr;
838
839 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
840 while (iPT-- > 0)
841 {
842 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
843
844 if ( Pde.n.u1Present
845 && (pVM->fRawR0Enabled || Pde.n.u1User))
846 {
847 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
848#ifdef IN_RING3
849 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
850 " PDE=%016RX64.\n",
851 GCPtr, pCur->pszDesc, Pde.u));
852 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, GCPtr);
853 AssertRCReturn(rc, rc);
854
855 /*
856 * Update pCur.
857 */
858 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
859 while (pCur && pCur->GCPtr < GCPtr)
860 pCur = pCur->CTX_SUFF(pNext);
861 break;
862#else
863 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
864 " PDE=%016RX64.\n",
865 GCPtr, Pde.u));
866 return VINF_PGM_SYNC_CR3;
867#endif
868 }
869 GCPtr += (1 << X86_PD_PAE_SHIFT);
870 }
871 if (!pCur)
872 break;
873 }
874 }
875 else
876 AssertFailed();
877
878 return VINF_SUCCESS;
879}
880# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
881
882#endif /* IN_RING0 */
883
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette