VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 18976

Last change on this file since 18976 was 18927, checked in by vboxsync, 16 years ago

Big step to separate VMM data structures for guest SMP. (pgm, em)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.2 KB
Line 
1/* $Id: PGMAllMap.cpp 18927 2009-04-16 11:41:38Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226 /* This only applies to raw mode where we only support 1 VCPU. */
227 PVMCPU pVCpu = &pVM->aCpus[0];
228
229 if (!pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
230 return; /* too early */
231
232 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
233 Assert(enmShadowMode <= PGMMODE_PAE_NX);
234
235 /*
236 * Insert the page tables into the shadow page directories.
237 */
238 unsigned i = pMap->cPTs;
239 iNewPDE += i;
240 while (i-- > 0)
241 {
242 iNewPDE--;
243
244 switch (enmShadowMode)
245 {
246 case PGMMODE_32_BIT:
247 {
248 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
249 AssertFatal(pShw32BitPd);
250#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
251 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
252#endif
253 /* Free any previous user, unless it's us. */
254 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
255 || (pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPT);
256 if ( pShw32BitPd->a[iNewPDE].n.u1Present
257 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
258 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
259
260 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */
261 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
262 | (uint32_t)pMap->aPTs[i].HCPhysPT;
263#ifdef IN_RC
264 /* Unlock dynamic mappings again. */
265 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
266#endif
267 break;
268 }
269
270 case PGMMODE_PAE:
271 case PGMMODE_PAE_NX:
272 {
273 const uint32_t iPdPt = iNewPDE / 256;
274 unsigned iPaePde = iNewPDE * 2 % 512;
275 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
276 Assert(pShwPdpt);
277#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
278 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
279#endif
280
281 /*
282 * Get the shadow PD.
283 * If no PD, sync it (PAE guest) or fake (not present or 32-bit guest).
284 * Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the
285 * accessed bit causes invalid VT-x guest state errors.
286 */
287 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVCpu->pgm.s, iPdPt << X86_PDPT_SHIFT);
288 if (!pShwPaePd)
289 {
290 X86PDPE GstPdpe;
291 if (PGMGetGuestMode(pVCpu) < PGMMODE_PAE)
292 GstPdpe.u = X86_PDPE_P;
293 else
294 {
295 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(&pVCpu->pgm.s, iPdPt << X86_PDPT_SHIFT);
296 if (pGstPdpe)
297 GstPdpe = *pGstPdpe;
298 else
299 GstPdpe.u = X86_PDPE_P;
300 }
301 int rc = pgmShwSyncPaePDPtr(pVM, pVCpu, iPdPt << X86_PDPT_SHIFT, &GstPdpe, &pShwPaePd);
302 AssertFatalRC(rc);
303 }
304 Assert(pShwPaePd);
305#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
306 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
307#endif
308
309 /*
310 * Mark the page as locked; disallow flushing.
311 */
312 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
313 AssertFatal(pPoolPagePd);
314 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
315 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
316#ifdef VBOX_STRICT
317 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)
318 {
319 Assert(PGMGetGuestMode(pVCpu) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */
320 AssertFatalMsg( (pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0
321 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
322 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
323 Assert(pShwPaePd->a[iPaePde+1].u & PGM_PDFLAGS_MAPPING);
324 AssertFatalMsg( (pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1
325 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
326 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
327 }
328#endif
329
330 /*
331 * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us).
332 */
333 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
334 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0);
335 if ( pShwPaePd->a[iPaePde].n.u1Present
336 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
337 {
338 Assert(!(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
339 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
340 }
341 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
342 | pMap->aPTs[i].HCPhysPaePT0;
343
344 /* 2nd 2 MB PDE of the 4 MB region, same as above. */
345 iPaePde++;
346 AssertFatal(iPaePde < 512);
347 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
348 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1);
349 if ( pShwPaePd->a[iPaePde].n.u1Present
350 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
351 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
352 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
353 | pMap->aPTs[i].HCPhysPaePT1;
354
355 /*
356 * Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode)
357 */
358 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
359
360#ifdef IN_RC
361 /* Unlock dynamic mappings again. */
362 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
363 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
364#endif
365 break;
366 }
367
368 default:
369 AssertFailed();
370 break;
371 }
372 }
373}
374
375
376/**
377 * Clears all PDEs involved with the mapping in the shadow page table.
378 *
379 * @param pVM The VM handle.
380 * @param pShwPageCR3 CR3 root page
381 * @param pMap Pointer to the mapping in question.
382 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
383 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
384 */
385void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
386{
387 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
388
389 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
390 return;
391
392 /* This only applies to raw mode where we only support 1 VCPU. */
393 PVMCPU pVCpu = &pVM->aCpus[0];
394
395 Assert(pShwPageCR3);
396# ifdef IN_RC
397 Assert(pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
398# endif
399
400 PX86PDPT pCurrentShwPdpt = NULL;
401 if ( PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
402 && pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
403 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
404
405 unsigned i = pMap->cPTs;
406 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
407
408 iOldPDE += i;
409 while (i-- > 0)
410 {
411 iOldPDE--;
412
413 switch(enmShadowMode)
414 {
415 case PGMMODE_32_BIT:
416 {
417 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
418 AssertFatal(pShw32BitPd);
419
420 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
421 pShw32BitPd->a[iOldPDE].u = 0;
422 break;
423 }
424
425 case PGMMODE_PAE:
426 case PGMMODE_PAE_NX:
427 {
428 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
429 unsigned iPaePde = iOldPDE * 2 % 512;
430 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
431 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVCpu->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
432
433 /*
434 * Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode)
435 */
436 if (fDeactivateCR3)
437 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
438 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
439 {
440 /* See if there are any other mappings here. This is suboptimal code. */
441 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
442 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
443 if ( pCur != pMap
444 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
445 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
446 {
447 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
448 break;
449 }
450 }
451
452 /*
453 * If the page directory of the old CR3 is reused in the new one, then don't
454 * clear the hypervisor mappings.
455 */
456 if ( pCurrentShwPdpt
457 && (pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) )
458 {
459 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
460 break;
461 }
462
463 /*
464 * Clear the mappings in the PD.
465 */
466 AssertFatal(pShwPaePd);
467 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
468 pShwPaePd->a[iPaePde].u = 0;
469
470 iPaePde++;
471 AssertFatal(iPaePde < 512);
472 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
473 pShwPaePd->a[iPaePde].u = 0;
474
475 /*
476 * Unlock the shadow pool PD page if the PDPTE no longer holds any mappings.
477 */
478 if ( fDeactivateCR3
479 || !(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
480 {
481 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
482 AssertFatal(pPoolPagePd);
483 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
484 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
485 }
486 break;
487 }
488
489 default:
490 AssertFailed();
491 break;
492 }
493 }
494}
495#endif /* !IN_RING0 */
496
497#if defined(VBOX_STRICT) && !defined(IN_RING0)
498/**
499 * Clears all PDEs involved with the mapping in the shadow page table.
500 *
501 * @param pVM The VM handle.
502 * @param pVCpu The VMCPU handle.
503 * @param pShwPageCR3 CR3 root page
504 * @param pMap Pointer to the mapping in question.
505 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
506 */
507static void pgmMapCheckShadowPDEs(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
508{
509 Assert(pShwPageCR3);
510
511 uint32_t i = pMap->cPTs;
512 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
513
514 iPDE += i;
515 while (i-- > 0)
516 {
517 iPDE--;
518
519 switch (enmShadowMode)
520 {
521 case PGMMODE_32_BIT:
522 {
523 PCX86PD pShw32BitPd = (PCX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
524 AssertFatal(pShw32BitPd);
525
526 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
527 ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
528 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
529 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
530 break;
531 }
532
533 case PGMMODE_PAE:
534 case PGMMODE_PAE_NX:
535 {
536 const unsigned iPdpt = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
537 unsigned iPaePDE = iPDE * 2 % 512;
538 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
539 PCX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVCpu->pgm.s, pShwPdpt, iPdpt << X86_PDPT_SHIFT);
540 AssertFatal(pShwPaePd);
541
542 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
543 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
544 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
545 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
546
547 iPaePDE++;
548 AssertFatal(iPaePDE < 512);
549
550 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
551 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
552 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
553 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
554
555 AssertMsg(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING,
556 ("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
557 pShwPdpt->a[iPdpt].u,
558 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
559
560 PCPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
561 AssertFatal(pPoolPagePd);
562 AssertMsg(pPoolPagePd->fLocked, (".idx=%d .type=%d\n", pPoolPagePd->idx, pPoolPagePd->enmKind));
563 break;
564 }
565
566 default:
567 AssertFailed();
568 break;
569 }
570 }
571}
572
573
574/**
575 * Check the hypervisor mappings in the active CR3.
576 *
577 * @param pVM The virtual machine.
578 */
579VMMDECL(void) PGMMapCheck(PVM pVM)
580{
581 /*
582 * Can skip this if mappings are disabled.
583 */
584 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
585 return;
586
587 Assert(pVM->cCPUs == 1);
588
589 /* This only applies to raw mode where we only support 1 VCPU. */
590 PVMCPU pVCpu = &pVM->aCpus[0];
591
592 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
593
594 /*
595 * Iterate mappings.
596 */
597 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
598 {
599 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
600 pgmMapCheckShadowPDEs(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
601 }
602}
603#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
604
605#ifndef IN_RING0
606
607/**
608 * Apply the hypervisor mappings to the active CR3.
609 *
610 * @returns VBox status.
611 * @param pVM The virtual machine.
612 * @param pShwPageCR3 CR3 root page
613 */
614int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
615{
616 /*
617 * Can skip this if mappings are disabled.
618 */
619 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
620 return VINF_SUCCESS;
621
622 /* Note. A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
623 Log4(("pgmMapActivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
624
625 Assert(pVM->cCPUs == 1);
626
627#ifdef DEBUG
628 /* This only applies to raw mode where we only support 1 VCPU. */
629 PVMCPU pVCpu = &pVM->aCpus[0];
630#endif
631 Assert(pShwPageCR3 && pShwPageCR3 == pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
632
633 /*
634 * Iterate mappings.
635 */
636 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
637 {
638 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
639 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
640 }
641 return VINF_SUCCESS;
642}
643
644
645/**
646 * Remove the hypervisor mappings from the specified CR3
647 *
648 * @returns VBox status.
649 * @param pVM The virtual machine.
650 * @param pShwPageCR3 CR3 root page
651 */
652int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
653{
654 /*
655 * Can skip this if mappings are disabled.
656 */
657 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
658 return VINF_SUCCESS;
659
660 Assert(pShwPageCR3);
661 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
662
663 /*
664 * Iterate mappings.
665 */
666 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
667 {
668 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
669 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
670 }
671 return VINF_SUCCESS;
672}
673
674
675/**
676 * Checks guest PD for conflicts with VMM GC mappings.
677 *
678 * @returns true if conflict detected.
679 * @returns false if not.
680 * @param pVM The virtual machine.
681 */
682VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
683{
684 /*
685 * Can skip this if mappings are safely fixed.
686 */
687 if (pVM->pgm.s.fMappingsFixed)
688 return false;
689
690 Assert(pVM->cCPUs == 1);
691
692 /* This only applies to raw mode where we only support 1 VCPU. */
693 PVMCPU pVCpu = &pVM->aCpus[0];
694
695 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
696 Assert(enmGuestMode <= PGMMODE_PAE_NX);
697
698 /*
699 * Iterate mappings.
700 */
701 if (enmGuestMode == PGMMODE_32_BIT)
702 {
703 /*
704 * Resolve the page directory.
705 */
706 PX86PD pPD = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
707 Assert(pPD);
708
709 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
710 {
711 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
712 unsigned iPT = pCur->cPTs;
713 while (iPT-- > 0)
714 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
715 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
716 {
717 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
718
719#ifdef IN_RING3
720 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
721 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
722 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
723 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
724#else
725 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
726 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
727 (iPT + iPDE) << X86_PD_SHIFT,
728 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
729#endif
730 return true;
731 }
732 }
733 }
734 else if ( enmGuestMode == PGMMODE_PAE
735 || enmGuestMode == PGMMODE_PAE_NX)
736 {
737 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
738 {
739 RTGCPTR GCPtr = pCur->GCPtr;
740
741 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
742 while (iPT-- > 0)
743 {
744 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
745
746 if ( Pde.n.u1Present
747 && (pVM->fRawR0Enabled || Pde.n.u1User))
748 {
749 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
750#ifdef IN_RING3
751 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
752 " PDE=%016RX64.\n",
753 GCPtr, pCur->pszDesc, Pde.u));
754#else
755 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
756 " PDE=%016RX64.\n",
757 GCPtr, Pde.u));
758#endif
759 return true;
760 }
761 GCPtr += (1 << X86_PD_PAE_SHIFT);
762 }
763 }
764 }
765 else
766 AssertFailed();
767
768 return false;
769}
770
771
772/**
773 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
774 *
775 * @returns VBox status.
776 * @param pVM The virtual machine.
777 */
778VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
779{
780 /*
781 * Can skip this if mappings are safely fixed.
782 */
783 if (pVM->pgm.s.fMappingsFixed)
784 return VINF_SUCCESS;
785
786 Assert(pVM->cCPUs == 1);
787
788 /* This only applies to raw mode where we only support 1 VCPU. */
789 PVMCPU pVCpu = &pVM->aCpus[0];
790
791 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
792 Assert(enmGuestMode <= PGMMODE_PAE_NX);
793
794 if (enmGuestMode == PGMMODE_32_BIT)
795 {
796 /*
797 * Resolve the page directory.
798 */
799 PX86PD pPD = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
800 Assert(pPD);
801
802 /*
803 * Iterate mappings.
804 */
805 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
806 {
807 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
808 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
809 unsigned iPT = pCur->cPTs;
810 while (iPT-- > 0)
811 {
812 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
813 && ( pVM->fRawR0Enabled
814 || pPD->a[iPDE + iPT].n.u1User))
815 {
816 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
817
818#ifdef IN_RING3
819 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
820 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
821 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
822 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
823 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
824 AssertRCReturn(rc, rc);
825 break;
826#else
827 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
828 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
829 (iPT + iPDE) << X86_PD_SHIFT,
830 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
831 return VINF_PGM_SYNC_CR3;
832#endif
833 }
834 }
835 pCur = pNext;
836 }
837 }
838 else if ( enmGuestMode == PGMMODE_PAE
839 || enmGuestMode == PGMMODE_PAE_NX)
840 {
841 /*
842 * Iterate mappings.
843 */
844 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
845 {
846 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
847 RTGCPTR GCPtr = pCur->GCPtr;
848 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
849 while (iPT-- > 0)
850 {
851 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
852
853 if ( Pde.n.u1Present
854 && (pVM->fRawR0Enabled || Pde.n.u1User))
855 {
856 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
857#ifdef IN_RING3
858 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
859 " PDE=%016RX64.\n",
860 GCPtr, pCur->pszDesc, Pde.u));
861 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
862 AssertRCReturn(rc, rc);
863 break;
864#else
865 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
866 " PDE=%016RX64.\n",
867 GCPtr, Pde.u));
868 return VINF_PGM_SYNC_CR3;
869#endif
870 }
871 GCPtr += (1 << X86_PD_PAE_SHIFT);
872 }
873 pCur = pNext;
874 }
875 }
876 else
877 AssertFailed();
878
879 Assert(!PGMMapHasConflicts(pVM));
880 return VINF_SUCCESS;
881}
882
883#endif /* IN_RING0 */
884
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette