VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 31463

Last change on this file since 31463 was 31402, checked in by vboxsync, 14 years ago

PGM: Replaced the hazzardous raw-mode context dynamic mapping code with the PGMR0DynMap code used by darwin/x86. This is a risky change but it should pay off once stable by providing 100% certainty that dynamically mapped pages aren't resued behind our back (this has been observed in seemingly benign code paths recently).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 32.8 KB
Line 
1/* $Id: PGMAllMap.cpp 31402 2010-08-05 12:28:18Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include "../PGMInternal.h"
24#include <VBox/vm.h>
25#include "../PGMInline.h"
26#include <VBox/err.h>
27#include <iprt/asm-amd64-x86.h>
28#include <iprt/assert.h>
29
30
31/**
32 * Maps a range of physical pages at a given virtual address
33 * in the guest context.
34 *
35 * The GC virtual address range must be within an existing mapping.
36 *
37 * @returns VBox status code.
38 * @param pVM The virtual machine.
39 * @param GCPtr Where to map the page(s). Must be page aligned.
40 * @param HCPhys Start of the range of physical pages. Must be page aligned.
41 * @param cbPages Number of bytes to map. Must be page aligned.
42 * @param fFlags Page flags (X86_PTE_*).
43 */
44VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
45{
46 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
47
48 /*
49 * Validate input.
50 */
51 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
52 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
53 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
54
55 /* hypervisor defaults */
56 if (!fFlags)
57 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
58
59 /*
60 * Find the mapping.
61 */
62 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
63 while (pCur)
64 {
65 if (GCPtr - pCur->GCPtr < pCur->cb)
66 {
67 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
68 {
69 AssertMsgFailed(("Invalid range!!\n"));
70 return VERR_INVALID_PARAMETER;
71 }
72
73 /*
74 * Setup PTE.
75 */
76 X86PTEPAE Pte;
77 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
78
79 /*
80 * Update the page tables.
81 */
82 for (;;)
83 {
84 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
85 const unsigned iPT = off >> X86_PD_SHIFT;
86 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
87
88 /* 32-bit */
89 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
90
91 /* pae */
92 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
93
94 /* next */
95 cbPages -= PAGE_SIZE;
96 if (!cbPages)
97 break;
98 GCPtr += PAGE_SIZE;
99 Pte.u += PAGE_SIZE;
100 }
101
102 return VINF_SUCCESS;
103 }
104
105 /* next */
106 pCur = pCur->CTX_SUFF(pNext);
107 }
108
109 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
110 return VERR_INVALID_PARAMETER;
111}
112
113
114/**
115 * Sets (replaces) the page flags for a range of pages in a mapping.
116 *
117 * @returns VBox status.
118 * @param pVM VM handle.
119 * @param GCPtr Virtual address of the first page in the range.
120 * @param cb Size (in bytes) of the range to apply the modification to.
121 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
122 */
123VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
124{
125 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
126}
127
128
129/**
130 * Modify page flags for a range of pages in a mapping.
131 *
132 * The existing flags are ANDed with the fMask and ORed with the fFlags.
133 *
134 * @returns VBox status code.
135 * @param pVM VM handle.
136 * @param GCPtr Virtual address of the first page in the range.
137 * @param cb Size (in bytes) of the range to apply the modification to.
138 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
139 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
140 */
141VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
142{
143 /*
144 * Validate input.
145 */
146 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
147 Assert(cb);
148
149 /*
150 * Align the input.
151 */
152 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
153 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
154 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
155
156 /*
157 * Find the mapping.
158 */
159 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
160 while (pCur)
161 {
162 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
163 if (off < pCur->cb)
164 {
165 AssertMsgReturn(off + cb <= pCur->cb,
166 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
167 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
168 VERR_INVALID_PARAMETER);
169
170 /*
171 * Perform the requested operation.
172 */
173 while (cb > 0)
174 {
175 unsigned iPT = off >> X86_PD_SHIFT;
176 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
177 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
178 {
179 /* 32-Bit */
180 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
181 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
182
183 /* PAE */
184 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
185 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
186
187 /* invalidate tls */
188 PGM_INVL_PG(VMMGetCpu(pVM), (RTGCUINTPTR)pCur->GCPtr + off);
189
190 /* next */
191 iPTE++;
192 cb -= PAGE_SIZE;
193 off += PAGE_SIZE;
194 }
195 }
196
197 return VINF_SUCCESS;
198 }
199 /* next */
200 pCur = pCur->CTX_SUFF(pNext);
201 }
202
203 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
204 return VERR_INVALID_PARAMETER;
205}
206
207
208#ifndef IN_RING0
209/**
210 * Sets all PDEs involved with the mapping in the shadow page table.
211 *
212 * @param pVM The VM handle.
213 * @param pMap Pointer to the mapping in question.
214 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
215 */
216void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
217{
218 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
219
220 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
221 || pVM->cCpus > 1)
222 return;
223
224 /* This only applies to raw mode where we only support 1 VCPU. */
225 PVMCPU pVCpu = VMMGetCpu0(pVM);
226 if (!pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
227 return; /* too early */
228
229 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
230 Assert(enmShadowMode <= PGMMODE_PAE_NX);
231
232 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
233
234 /*
235 * Insert the page tables into the shadow page directories.
236 */
237 unsigned i = pMap->cPTs;
238 iNewPDE += i;
239 while (i-- > 0)
240 {
241 iNewPDE--;
242
243 switch (enmShadowMode)
244 {
245 case PGMMODE_32_BIT:
246 {
247 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu);
248 AssertFatal(pShw32BitPd);
249
250 /* Free any previous user, unless it's us. */
251 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
252 || (pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPT);
253 if ( pShw32BitPd->a[iNewPDE].n.u1Present
254 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
255 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
256
257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */
258 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
259 | (uint32_t)pMap->aPTs[i].HCPhysPT;
260 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd);
261 break;
262 }
263
264 case PGMMODE_PAE:
265 case PGMMODE_PAE_NX:
266 {
267 const uint32_t iPdPt = iNewPDE / 256;
268 unsigned iPaePde = iNewPDE * 2 % 512;
269 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
270 Assert(pShwPdpt);
271
272 /*
273 * Get the shadow PD.
274 * If no PD, sync it (PAE guest) or fake (not present or 32-bit guest).
275 * Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the
276 * accessed bit causes invalid VT-x guest state errors.
277 */
278 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT);
279 if (!pShwPaePd)
280 {
281 X86PDPE GstPdpe;
282 if (PGMGetGuestMode(pVCpu) < PGMMODE_PAE)
283 GstPdpe.u = X86_PDPE_P;
284 else
285 {
286 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(pVCpu, iPdPt << X86_PDPT_SHIFT);
287 if (pGstPdpe)
288 GstPdpe = *pGstPdpe;
289 else
290 GstPdpe.u = X86_PDPE_P;
291 }
292 int rc = pgmShwSyncPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT, GstPdpe.u, &pShwPaePd);
293 AssertFatalRC(rc);
294 }
295 Assert(pShwPaePd);
296
297 /*
298 * Mark the page as locked; disallow flushing.
299 */
300 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
301 AssertFatal(pPoolPagePd);
302 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
303 pgmPoolLockPage(pPool, pPoolPagePd);
304#ifdef VBOX_STRICT
305 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)
306 {
307 Assert(PGMGetGuestMode(pVCpu) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */
308 AssertFatalMsg( (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0
309 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
310 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
311 Assert(pShwPaePd->a[iPaePde+1].u & PGM_PDFLAGS_MAPPING);
312 AssertFatalMsg( (pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1
313 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
314 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
315 }
316#endif
317
318 /*
319 * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us).
320 */
321 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
322 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0);
323 if ( pShwPaePd->a[iPaePde].n.u1Present
324 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
325 {
326 Assert(!(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
327 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK, pPoolPagePd->idx, iPaePde);
328 }
329 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
330 | pMap->aPTs[i].HCPhysPaePT0;
331
332 /* 2nd 2 MB PDE of the 4 MB region, same as above. */
333 iPaePde++;
334 AssertFatal(iPaePde < 512);
335 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
336 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1);
337 if ( pShwPaePd->a[iPaePde].n.u1Present
338 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
339 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
340 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
341 | pMap->aPTs[i].HCPhysPaePT1;
342
343 /*
344 * Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode)
345 */
346 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
347
348 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd);
349 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt);
350 break;
351 }
352
353 default:
354 AssertFailed();
355 break;
356 }
357 }
358}
359
360
361/**
362 * Clears all PDEs involved with the mapping in the shadow page table.
363 *
364 * @param pVM The VM handle.
365 * @param pShwPageCR3 CR3 root page
366 * @param pMap Pointer to the mapping in question.
367 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
368 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
369 */
370void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
371{
372 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
373
374 /*
375 * Skip this if disabled or if it doesn't apply.
376 */
377 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
378 || pVM->cCpus > 1)
379 return;
380
381 Assert(pShwPageCR3);
382
383 /* This only applies to raw mode where we only support 1 VCPU. */
384 PVMCPU pVCpu = VMMGetCpu0(pVM);
385# ifdef IN_RC
386 Assert(pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
387# endif
388
389 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
390
391 PX86PDPT pCurrentShwPdpt = NULL;
392 if ( PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
393 && pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
394 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
395
396 unsigned i = pMap->cPTs;
397 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
398
399 iOldPDE += i;
400 while (i-- > 0)
401 {
402 iOldPDE--;
403
404 switch(enmShadowMode)
405 {
406 case PGMMODE_32_BIT:
407 {
408 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
409 AssertFatal(pShw32BitPd);
410
411 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
412 pShw32BitPd->a[iOldPDE].u = 0;
413 break;
414 }
415
416 case PGMMODE_PAE:
417 case PGMMODE_PAE_NX:
418 {
419 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
420 unsigned iPaePde = iOldPDE * 2 % 512;
421 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
422 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
423
424 /*
425 * Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode)
426 */
427 if (fDeactivateCR3)
428 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
429 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
430 {
431 /* See if there are any other mappings here. This is suboptimal code. */
432 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
433 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
434 if ( pCur != pMap
435 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
436 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
437 {
438 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
439 break;
440 }
441 }
442
443 /*
444 * If the page directory of the old CR3 is reused in the new one, then don't
445 * clear the hypervisor mappings.
446 */
447 if ( pCurrentShwPdpt
448 && (pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) )
449 {
450 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
451 break;
452 }
453
454 /*
455 * Clear the mappings in the PD.
456 */
457 AssertFatal(pShwPaePd);
458 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
459 pShwPaePd->a[iPaePde].u = 0;
460
461 iPaePde++;
462 AssertFatal(iPaePde < 512);
463 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
464 pShwPaePd->a[iPaePde].u = 0;
465
466 /*
467 * Unlock the shadow pool PD page if the PDPTE no longer holds any mappings.
468 */
469 if ( fDeactivateCR3
470 || !(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
471 {
472 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
473 AssertFatal(pPoolPagePd);
474 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
475 pgmPoolUnlockPage(pPool, pPoolPagePd);
476 }
477 break;
478 }
479
480 default:
481 AssertFailed();
482 break;
483 }
484 }
485
486 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt);
487}
488#endif /* !IN_RING0 */
489
490#if defined(VBOX_STRICT) && !defined(IN_RING0)
491/**
492 * Clears all PDEs involved with the mapping in the shadow page table.
493 *
494 * @param pVM The VM handle.
495 * @param pVCpu The VMCPU handle.
496 * @param pShwPageCR3 CR3 root page
497 * @param pMap Pointer to the mapping in question.
498 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
499 */
500static void pgmMapCheckShadowPDEs(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
501{
502 Assert(pShwPageCR3);
503
504 uint32_t i = pMap->cPTs;
505 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
506 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
507
508 iPDE += i;
509 while (i-- > 0)
510 {
511 iPDE--;
512
513 switch (enmShadowMode)
514 {
515 case PGMMODE_32_BIT:
516 {
517 PCX86PD pShw32BitPd = (PCX86PD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
518 AssertFatal(pShw32BitPd);
519
520 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
521 ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
522 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
523 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
524 break;
525 }
526
527 case PGMMODE_PAE:
528 case PGMMODE_PAE_NX:
529 {
530 const unsigned iPdpt = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
531 unsigned iPaePDE = iPDE * 2 % 512;
532 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
533 PCX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, pShwPdpt, iPdpt << X86_PDPT_SHIFT);
534 AssertFatal(pShwPaePd);
535
536 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
537 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
538 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
539 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
540
541 iPaePDE++;
542 AssertFatal(iPaePDE < 512);
543
544 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
545 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
546 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
547 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
548
549 AssertMsg(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING,
550 ("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
551 pShwPdpt->a[iPdpt].u,
552 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
553
554 PCPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
555 AssertFatal(pPoolPagePd);
556 AssertMsg(pPoolPagePd->cLocked, (".idx=%d .type=%d\n", pPoolPagePd->idx, pPoolPagePd->enmKind));
557 break;
558 }
559
560 default:
561 AssertFailed();
562 break;
563 }
564 }
565}
566
567
568/**
569 * Check the hypervisor mappings in the active CR3.
570 *
571 * @param pVM The virtual machine.
572 */
573VMMDECL(void) PGMMapCheck(PVM pVM)
574{
575 /*
576 * Can skip this if mappings are disabled.
577 */
578 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
579 return;
580
581 /* This only applies to raw mode where we only support 1 VCPU. */
582 Assert(pVM->cCpus == 1);
583 PVMCPU pVCpu = VMMGetCpu0(pVM);
584 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
585
586 /*
587 * Iterate mappings.
588 */
589 pgmLock(pVM); /* to avoid assertions */
590 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
591 {
592 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
593 pgmMapCheckShadowPDEs(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
594 }
595 pgmUnlock(pVM);
596}
597#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
598
599#ifndef IN_RING0
600
601/**
602 * Apply the hypervisor mappings to the active CR3.
603 *
604 * @returns VBox status.
605 * @param pVM The virtual machine.
606 * @param pShwPageCR3 CR3 root page
607 */
608int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
609{
610 /*
611 * Skip this if disabled or if it doesn't apply.
612 */
613 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
614 || pVM->cCpus > 1)
615 return VINF_SUCCESS;
616
617 /* Note! This might not be logged successfully in RC because we usually
618 cannot flush the log at this point. */
619 Log4(("pgmMapActivateCR3: fixed mappings=%RTbool idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
620
621#ifdef VBOX_STRICT
622 PVMCPU pVCpu = VMMGetCpu0(pVM);
623 Assert(pShwPageCR3 && pShwPageCR3 == pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
624#endif
625
626 /*
627 * Iterate mappings.
628 */
629 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
630 {
631 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
632 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
633 }
634 return VINF_SUCCESS;
635}
636
637
638/**
639 * Remove the hypervisor mappings from the specified CR3
640 *
641 * @returns VBox status.
642 * @param pVM The virtual machine.
643 * @param pShwPageCR3 CR3 root page
644 */
645int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
646{
647 /*
648 * Skip this if disabled or if it doesn't apply.
649 */
650 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
651 || pVM->cCpus > 1)
652 return VINF_SUCCESS;
653
654 Assert(pShwPageCR3);
655 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
656
657 /*
658 * Iterate mappings.
659 */
660 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
661 {
662 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
663 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
664 }
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Checks guest PD for conflicts with VMM GC mappings.
671 *
672 * @returns true if conflict detected.
673 * @returns false if not.
674 * @param pVM The virtual machine.
675 */
676VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
677{
678 /*
679 * Can skip this if mappings are safely fixed.
680 */
681 if (!pgmMapAreMappingsFloating(&pVM->pgm.s))
682 return false;
683
684 Assert(pVM->cCpus == 1);
685
686 /* This only applies to raw mode where we only support 1 VCPU. */
687 PVMCPU pVCpu = &pVM->aCpus[0];
688
689 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
690 Assert(enmGuestMode <= PGMMODE_PAE_NX);
691
692 /*
693 * Iterate mappings.
694 */
695 if (enmGuestMode == PGMMODE_32_BIT)
696 {
697 /*
698 * Resolve the page directory.
699 */
700 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
701 Assert(pPD);
702
703 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
704 {
705 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
706 unsigned iPT = pCur->cPTs;
707 while (iPT-- > 0)
708 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
709 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
710 {
711 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
712
713#ifdef IN_RING3
714 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
715 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
716 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
717 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
718#else
719 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
720 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
721 (iPT + iPDE) << X86_PD_SHIFT,
722 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
723#endif
724 return true;
725 }
726 }
727 }
728 else if ( enmGuestMode == PGMMODE_PAE
729 || enmGuestMode == PGMMODE_PAE_NX)
730 {
731 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
732 {
733 RTGCPTR GCPtr = pCur->GCPtr;
734
735 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
736 while (iPT-- > 0)
737 {
738 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
739
740 if ( Pde.n.u1Present
741 && (pVM->fRawR0Enabled || Pde.n.u1User))
742 {
743 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
744#ifdef IN_RING3
745 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
746 " PDE=%016RX64.\n",
747 GCPtr, pCur->pszDesc, Pde.u));
748#else
749 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
750 " PDE=%016RX64.\n",
751 GCPtr, Pde.u));
752#endif
753 return true;
754 }
755 GCPtr += (1 << X86_PD_PAE_SHIFT);
756 }
757 }
758 }
759 else
760 AssertFailed();
761
762 return false;
763}
764
765
766/**
767 * Checks and resolves (ring 3 only) guest conflicts with the guest mappings.
768 *
769 * @returns VBox status.
770 * @param pVM The virtual machine.
771 */
772int pgmMapResolveConflicts(PVM pVM)
773{
774 /* The caller is expected to check these two conditions. */
775 Assert(!pVM->pgm.s.fMappingsFixed);
776 Assert(!pVM->pgm.s.fMappingsDisabled);
777
778 /* This only applies to raw mode where we only support 1 VCPU. */
779 Assert(pVM->cCpus == 1);
780 PVMCPU pVCpu = &pVM->aCpus[0];
781 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
782 Assert(enmGuestMode <= PGMMODE_PAE_NX);
783
784 if (enmGuestMode == PGMMODE_32_BIT)
785 {
786 /*
787 * Resolve the page directory.
788 */
789 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
790 Assert(pPD);
791
792 /*
793 * Iterate mappings.
794 */
795 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
796 {
797 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
798 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
799 unsigned iPT = pCur->cPTs;
800 while (iPT-- > 0)
801 {
802 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
803 && ( pVM->fRawR0Enabled
804 || pPD->a[iPDE + iPT].n.u1User))
805 {
806 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
807
808#ifdef IN_RING3
809 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
810 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
811 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
812 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
813 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
814 AssertRCReturn(rc, rc);
815 break;
816#else
817 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
818 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
819 (iPT + iPDE) << X86_PD_SHIFT,
820 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
821 return VINF_PGM_SYNC_CR3;
822#endif
823 }
824 }
825 pCur = pNext;
826 }
827 }
828 else if ( enmGuestMode == PGMMODE_PAE
829 || enmGuestMode == PGMMODE_PAE_NX)
830 {
831 /*
832 * Iterate mappings.
833 */
834 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
835 {
836 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
837 RTGCPTR GCPtr = pCur->GCPtr;
838 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
839 while (iPT-- > 0)
840 {
841 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
842
843 if ( Pde.n.u1Present
844 && (pVM->fRawR0Enabled || Pde.n.u1User))
845 {
846 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
847#ifdef IN_RING3
848 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
849 " PDE=%016RX64.\n",
850 GCPtr, pCur->pszDesc, Pde.u));
851 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
852 AssertRCReturn(rc, rc);
853 break;
854#else
855 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
856 " PDE=%016RX64.\n",
857 GCPtr, Pde.u));
858 return VINF_PGM_SYNC_CR3;
859#endif
860 }
861 GCPtr += (1 << X86_PD_PAE_SHIFT);
862 }
863 pCur = pNext;
864 }
865 }
866 else
867 AssertFailed();
868
869 Assert(!PGMMapHasConflicts(pVM));
870 return VINF_SUCCESS;
871}
872
873#endif /* IN_RING0 */
874
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette