VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 31794

Last change on this file since 31794 was 31775, checked in by vboxsync, 15 years ago

PGM: Wrap up all access to PAE/LM PTEs so that we can treat the invalid entries used by PGM_WITH_MMIO_OPTIMIZATIONS as not-present.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.0 KB
Line 
1/* $Id: PGMAllMap.cpp 31775 2010-08-19 09:48:24Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include "../PGMInternal.h"
24#include <VBox/vm.h>
25#include "../PGMInline.h"
26#include <VBox/err.h>
27#include <iprt/asm-amd64-x86.h>
28#include <iprt/assert.h>
29
30
31/**
32 * Maps a range of physical pages at a given virtual address
33 * in the guest context.
34 *
35 * The GC virtual address range must be within an existing mapping.
36 *
37 * @returns VBox status code.
38 * @param pVM The virtual machine.
39 * @param GCPtr Where to map the page(s). Must be page aligned.
40 * @param HCPhys Start of the range of physical pages. Must be page aligned.
41 * @param cbPages Number of bytes to map. Must be page aligned.
42 * @param fFlags Page flags (X86_PTE_*).
43 */
44VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
45{
46 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
47
48 /*
49 * Validate input.
50 */
51 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
52 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
53 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
54
55 /* hypervisor defaults */
56 if (!fFlags)
57 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
58
59 /*
60 * Find the mapping.
61 */
62 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
63 while (pCur)
64 {
65 if (GCPtr - pCur->GCPtr < pCur->cb)
66 {
67 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
68 {
69 AssertMsgFailed(("Invalid range!!\n"));
70 return VERR_INVALID_PARAMETER;
71 }
72
73 /*
74 * Setup PTE.
75 */
76 X86PTEPAE Pte;
77 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
78
79 /*
80 * Update the page tables.
81 */
82 for (;;)
83 {
84 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
85 const unsigned iPT = off >> X86_PD_SHIFT;
86 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
87
88 /* 32-bit */
89 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
90
91 /* pae */
92 PGMSHWPTEPAE_SET(pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512], Pte.u);
93
94 /* next */
95 cbPages -= PAGE_SIZE;
96 if (!cbPages)
97 break;
98 GCPtr += PAGE_SIZE;
99 Pte.u += PAGE_SIZE;
100 }
101
102 return VINF_SUCCESS;
103 }
104
105 /* next */
106 pCur = pCur->CTX_SUFF(pNext);
107 }
108
109 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
110 return VERR_INVALID_PARAMETER;
111}
112
113
114/**
115 * Sets (replaces) the page flags for a range of pages in a mapping.
116 *
117 * @returns VBox status.
118 * @param pVM VM handle.
119 * @param GCPtr Virtual address of the first page in the range.
120 * @param cb Size (in bytes) of the range to apply the modification to.
121 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
122 */
123VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
124{
125 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
126}
127
128
129/**
130 * Modify page flags for a range of pages in a mapping.
131 *
132 * The existing flags are ANDed with the fMask and ORed with the fFlags.
133 *
134 * @returns VBox status code.
135 * @param pVM VM handle.
136 * @param GCPtr Virtual address of the first page in the range.
137 * @param cb Size (in bytes) of the range to apply the modification to.
138 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
139 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
140 */
141VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
142{
143 /*
144 * Validate input.
145 */
146 AssertMsg(!(fFlags & (X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX)), ("fFlags=%#x\n", fFlags));
147 Assert(cb);
148
149 /*
150 * Align the input.
151 */
152 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
153 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
154 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
155
156 /*
157 * Find the mapping.
158 */
159 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
160 while (pCur)
161 {
162 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
163 if (off < pCur->cb)
164 {
165 AssertMsgReturn(off + cb <= pCur->cb,
166 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
167 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
168 VERR_INVALID_PARAMETER);
169
170 /*
171 * Perform the requested operation.
172 */
173 while (cb > 0)
174 {
175 unsigned iPT = off >> X86_PD_SHIFT;
176 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
177 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
178 {
179 /* 32-Bit */
180 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
181 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
182
183 /* PAE */
184 PPGMSHWPTEPAE pPtePae = &pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512];
185 PGMSHWPTEPAE_SET(*pPtePae,
186 ( PGMSHWPTEPAE_GET_U(*pPtePae)
187 & (fMask | X86_PTE_PAE_PG_MASK))
188 | (fFlags & ~(X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX)));
189
190 /* invalidate tls */
191 PGM_INVL_PG(VMMGetCpu(pVM), (RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
224 || pVM->cCpus > 1)
225 return;
226
227 /* This only applies to raw mode where we only support 1 VCPU. */
228 PVMCPU pVCpu = VMMGetCpu0(pVM);
229 if (!pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
230 return; /* too early */
231
232 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
233 Assert(enmShadowMode <= PGMMODE_PAE_NX);
234
235 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
236
237 /*
238 * Insert the page tables into the shadow page directories.
239 */
240 unsigned i = pMap->cPTs;
241 iNewPDE += i;
242 while (i-- > 0)
243 {
244 iNewPDE--;
245
246 switch (enmShadowMode)
247 {
248 case PGMMODE_32_BIT:
249 {
250 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu);
251 AssertFatal(pShw32BitPd);
252
253 /* Free any previous user, unless it's us. */
254 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
255 || (pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPT);
256 if ( pShw32BitPd->a[iNewPDE].n.u1Present
257 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
258 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
259
260 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */
261 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
262 | (uint32_t)pMap->aPTs[i].HCPhysPT;
263 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd);
264 break;
265 }
266
267 case PGMMODE_PAE:
268 case PGMMODE_PAE_NX:
269 {
270 const uint32_t iPdPt = iNewPDE / 256;
271 unsigned iPaePde = iNewPDE * 2 % 512;
272 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
273 Assert(pShwPdpt);
274
275 /*
276 * Get the shadow PD.
277 * If no PD, sync it (PAE guest) or fake (not present or 32-bit guest).
278 * Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the
279 * accessed bit causes invalid VT-x guest state errors.
280 */
281 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT);
282 if (!pShwPaePd)
283 {
284 X86PDPE GstPdpe;
285 if (PGMGetGuestMode(pVCpu) < PGMMODE_PAE)
286 GstPdpe.u = X86_PDPE_P;
287 else
288 {
289 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(pVCpu, iPdPt << X86_PDPT_SHIFT);
290 if (pGstPdpe)
291 GstPdpe = *pGstPdpe;
292 else
293 GstPdpe.u = X86_PDPE_P;
294 }
295 int rc = pgmShwSyncPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT, GstPdpe.u, &pShwPaePd);
296 AssertFatalRC(rc);
297 }
298 Assert(pShwPaePd);
299
300 /*
301 * Mark the page as locked; disallow flushing.
302 */
303 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
304 AssertFatal(pPoolPagePd);
305 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
306 pgmPoolLockPage(pPool, pPoolPagePd);
307#ifdef VBOX_STRICT
308 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)
309 {
310 Assert(PGMGetGuestMode(pVCpu) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */
311 AssertFatalMsg( (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0
312 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
313 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
314 Assert(pShwPaePd->a[iPaePde+1].u & PGM_PDFLAGS_MAPPING);
315 AssertFatalMsg( (pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1
316 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
317 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
318 }
319#endif
320
321 /*
322 * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us).
323 */
324 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
325 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0);
326 if ( pShwPaePd->a[iPaePde].n.u1Present
327 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
328 {
329 Assert(!(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
330 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK, pPoolPagePd->idx, iPaePde);
331 }
332 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
333 | pMap->aPTs[i].HCPhysPaePT0;
334
335 /* 2nd 2 MB PDE of the 4 MB region, same as above. */
336 iPaePde++;
337 AssertFatal(iPaePde < 512);
338 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
339 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1);
340 if ( pShwPaePd->a[iPaePde].n.u1Present
341 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
342 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
343 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
344 | pMap->aPTs[i].HCPhysPaePT1;
345
346 /*
347 * Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode)
348 */
349 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
350
351 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd);
352 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt);
353 break;
354 }
355
356 default:
357 AssertFailed();
358 break;
359 }
360 }
361}
362
363
364/**
365 * Clears all PDEs involved with the mapping in the shadow page table.
366 *
367 * @param pVM The VM handle.
368 * @param pShwPageCR3 CR3 root page
369 * @param pMap Pointer to the mapping in question.
370 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
371 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
372 */
373void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
374{
375 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
376
377 /*
378 * Skip this if disabled or if it doesn't apply.
379 */
380 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
381 || pVM->cCpus > 1)
382 return;
383
384 Assert(pShwPageCR3);
385
386 /* This only applies to raw mode where we only support 1 VCPU. */
387 PVMCPU pVCpu = VMMGetCpu0(pVM);
388# ifdef IN_RC
389 Assert(pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
390# endif
391
392 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
393
394 PX86PDPT pCurrentShwPdpt = NULL;
395 if ( PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
396 && pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
397 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
398
399 unsigned i = pMap->cPTs;
400 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
401
402 iOldPDE += i;
403 while (i-- > 0)
404 {
405 iOldPDE--;
406
407 switch(enmShadowMode)
408 {
409 case PGMMODE_32_BIT:
410 {
411 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
412 AssertFatal(pShw32BitPd);
413
414 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
415 pShw32BitPd->a[iOldPDE].u = 0;
416 break;
417 }
418
419 case PGMMODE_PAE:
420 case PGMMODE_PAE_NX:
421 {
422 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
423 unsigned iPaePde = iOldPDE * 2 % 512;
424 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
425 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
426
427 /*
428 * Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode)
429 */
430 if (fDeactivateCR3)
431 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
432 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
433 {
434 /* See if there are any other mappings here. This is suboptimal code. */
435 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
436 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
437 if ( pCur != pMap
438 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
439 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
440 {
441 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
442 break;
443 }
444 }
445
446 /*
447 * If the page directory of the old CR3 is reused in the new one, then don't
448 * clear the hypervisor mappings.
449 */
450 if ( pCurrentShwPdpt
451 && (pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) )
452 {
453 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
454 break;
455 }
456
457 /*
458 * Clear the mappings in the PD.
459 */
460 AssertFatal(pShwPaePd);
461 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
462 pShwPaePd->a[iPaePde].u = 0;
463
464 iPaePde++;
465 AssertFatal(iPaePde < 512);
466 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
467 pShwPaePd->a[iPaePde].u = 0;
468
469 /*
470 * Unlock the shadow pool PD page if the PDPTE no longer holds any mappings.
471 */
472 if ( fDeactivateCR3
473 || !(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
474 {
475 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
476 AssertFatal(pPoolPagePd);
477 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
478 pgmPoolUnlockPage(pPool, pPoolPagePd);
479 }
480 break;
481 }
482
483 default:
484 AssertFailed();
485 break;
486 }
487 }
488
489 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt);
490}
491#endif /* !IN_RING0 */
492
493#if defined(VBOX_STRICT) && !defined(IN_RING0)
494/**
495 * Clears all PDEs involved with the mapping in the shadow page table.
496 *
497 * @param pVM The VM handle.
498 * @param pVCpu The VMCPU handle.
499 * @param pShwPageCR3 CR3 root page
500 * @param pMap Pointer to the mapping in question.
501 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
502 */
503static void pgmMapCheckShadowPDEs(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
504{
505 Assert(pShwPageCR3);
506
507 uint32_t i = pMap->cPTs;
508 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
509 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
510
511 iPDE += i;
512 while (i-- > 0)
513 {
514 iPDE--;
515
516 switch (enmShadowMode)
517 {
518 case PGMMODE_32_BIT:
519 {
520 PCX86PD pShw32BitPd = (PCX86PD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
521 AssertFatal(pShw32BitPd);
522
523 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
524 ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
525 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
526 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
527 break;
528 }
529
530 case PGMMODE_PAE:
531 case PGMMODE_PAE_NX:
532 {
533 const unsigned iPdpt = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
534 unsigned iPaePDE = iPDE * 2 % 512;
535 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
536 PCX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, pShwPdpt, iPdpt << X86_PDPT_SHIFT);
537 AssertFatal(pShwPaePd);
538
539 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
540 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
541 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
542 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
543
544 iPaePDE++;
545 AssertFatal(iPaePDE < 512);
546
547 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
548 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
549 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
550 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
551
552 AssertMsg(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING,
553 ("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
554 pShwPdpt->a[iPdpt].u,
555 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
556
557 PCPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
558 AssertFatal(pPoolPagePd);
559 AssertMsg(pPoolPagePd->cLocked, (".idx=%d .type=%d\n", pPoolPagePd->idx, pPoolPagePd->enmKind));
560 break;
561 }
562
563 default:
564 AssertFailed();
565 break;
566 }
567 }
568}
569
570
571/**
572 * Check the hypervisor mappings in the active CR3.
573 *
574 * @param pVM The virtual machine.
575 */
576VMMDECL(void) PGMMapCheck(PVM pVM)
577{
578 /*
579 * Can skip this if mappings are disabled.
580 */
581 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
582 return;
583
584 /* This only applies to raw mode where we only support 1 VCPU. */
585 Assert(pVM->cCpus == 1);
586 PVMCPU pVCpu = VMMGetCpu0(pVM);
587 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
588
589 /*
590 * Iterate mappings.
591 */
592 pgmLock(pVM); /* to avoid assertions */
593 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
594 {
595 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
596 pgmMapCheckShadowPDEs(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
597 }
598 pgmUnlock(pVM);
599}
600#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
601
602#ifndef IN_RING0
603
604/**
605 * Apply the hypervisor mappings to the active CR3.
606 *
607 * @returns VBox status.
608 * @param pVM The virtual machine.
609 * @param pShwPageCR3 CR3 root page
610 */
611int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
612{
613 /*
614 * Skip this if disabled or if it doesn't apply.
615 */
616 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
617 || pVM->cCpus > 1)
618 return VINF_SUCCESS;
619
620 /* Note! This might not be logged successfully in RC because we usually
621 cannot flush the log at this point. */
622 Log4(("pgmMapActivateCR3: fixed mappings=%RTbool idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
623
624#ifdef VBOX_STRICT
625 PVMCPU pVCpu = VMMGetCpu0(pVM);
626 Assert(pShwPageCR3 && pShwPageCR3 == pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
627#endif
628
629 /*
630 * Iterate mappings.
631 */
632 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
633 {
634 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
635 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
636 }
637 return VINF_SUCCESS;
638}
639
640
641/**
642 * Remove the hypervisor mappings from the specified CR3
643 *
644 * @returns VBox status.
645 * @param pVM The virtual machine.
646 * @param pShwPageCR3 CR3 root page
647 */
648int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
649{
650 /*
651 * Skip this if disabled or if it doesn't apply.
652 */
653 if ( !pgmMapAreMappingsEnabled(&pVM->pgm.s)
654 || pVM->cCpus > 1)
655 return VINF_SUCCESS;
656
657 Assert(pShwPageCR3);
658 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
659
660 /*
661 * Iterate mappings.
662 */
663 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
664 {
665 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
666 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
667 }
668 return VINF_SUCCESS;
669}
670
671
672/**
673 * Checks guest PD for conflicts with VMM GC mappings.
674 *
675 * @returns true if conflict detected.
676 * @returns false if not.
677 * @param pVM The virtual machine.
678 */
679VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
680{
681 /*
682 * Can skip this if mappings are safely fixed.
683 */
684 if (!pgmMapAreMappingsFloating(&pVM->pgm.s))
685 return false;
686
687 Assert(pVM->cCpus == 1);
688
689 /* This only applies to raw mode where we only support 1 VCPU. */
690 PVMCPU pVCpu = &pVM->aCpus[0];
691
692 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
693 Assert(enmGuestMode <= PGMMODE_PAE_NX);
694
695 /*
696 * Iterate mappings.
697 */
698 if (enmGuestMode == PGMMODE_32_BIT)
699 {
700 /*
701 * Resolve the page directory.
702 */
703 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
704 Assert(pPD);
705
706 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
707 {
708 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
709 unsigned iPT = pCur->cPTs;
710 while (iPT-- > 0)
711 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
712 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
713 {
714 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
715
716#ifdef IN_RING3
717 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
718 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
719 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
720 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
721#else
722 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
723 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
724 (iPT + iPDE) << X86_PD_SHIFT,
725 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
726#endif
727 return true;
728 }
729 }
730 }
731 else if ( enmGuestMode == PGMMODE_PAE
732 || enmGuestMode == PGMMODE_PAE_NX)
733 {
734 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
735 {
736 RTGCPTR GCPtr = pCur->GCPtr;
737
738 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
739 while (iPT-- > 0)
740 {
741 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
742
743 if ( Pde.n.u1Present
744 && (pVM->fRawR0Enabled || Pde.n.u1User))
745 {
746 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
747#ifdef IN_RING3
748 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
749 " PDE=%016RX64.\n",
750 GCPtr, pCur->pszDesc, Pde.u));
751#else
752 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
753 " PDE=%016RX64.\n",
754 GCPtr, Pde.u));
755#endif
756 return true;
757 }
758 GCPtr += (1 << X86_PD_PAE_SHIFT);
759 }
760 }
761 }
762 else
763 AssertFailed();
764
765 return false;
766}
767
768
769/**
770 * Checks and resolves (ring 3 only) guest conflicts with the guest mappings.
771 *
772 * @returns VBox status.
773 * @param pVM The virtual machine.
774 */
775int pgmMapResolveConflicts(PVM pVM)
776{
777 /* The caller is expected to check these two conditions. */
778 Assert(!pVM->pgm.s.fMappingsFixed);
779 Assert(!pVM->pgm.s.fMappingsDisabled);
780
781 /* This only applies to raw mode where we only support 1 VCPU. */
782 Assert(pVM->cCpus == 1);
783 PVMCPU pVCpu = &pVM->aCpus[0];
784 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
785 Assert(enmGuestMode <= PGMMODE_PAE_NX);
786
787 if (enmGuestMode == PGMMODE_32_BIT)
788 {
789 /*
790 * Resolve the page directory.
791 */
792 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
793 Assert(pPD);
794
795 /*
796 * Iterate mappings.
797 */
798 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
799 {
800 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
801 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
802 unsigned iPT = pCur->cPTs;
803 while (iPT-- > 0)
804 {
805 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
806 && ( pVM->fRawR0Enabled
807 || pPD->a[iPDE + iPT].n.u1User))
808 {
809 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
810
811#ifdef IN_RING3
812 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
813 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
814 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
815 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
816 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
817 AssertRCReturn(rc, rc);
818 break;
819#else
820 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
821 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
822 (iPT + iPDE) << X86_PD_SHIFT,
823 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
824 return VINF_PGM_SYNC_CR3;
825#endif
826 }
827 }
828 pCur = pNext;
829 }
830 }
831 else if ( enmGuestMode == PGMMODE_PAE
832 || enmGuestMode == PGMMODE_PAE_NX)
833 {
834 /*
835 * Iterate mappings.
836 */
837 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
838 {
839 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
840 RTGCPTR GCPtr = pCur->GCPtr;
841 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
842 while (iPT-- > 0)
843 {
844 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
845
846 if ( Pde.n.u1Present
847 && (pVM->fRawR0Enabled || Pde.n.u1User))
848 {
849 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
850#ifdef IN_RING3
851 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
852 " PDE=%016RX64.\n",
853 GCPtr, pCur->pszDesc, Pde.u));
854 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
855 AssertRCReturn(rc, rc);
856 break;
857#else
858 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
859 " PDE=%016RX64.\n",
860 GCPtr, Pde.u));
861 return VINF_PGM_SYNC_CR3;
862#endif
863 }
864 GCPtr += (1 << X86_PD_PAE_SHIFT);
865 }
866 pCur = pNext;
867 }
868 }
869 else
870 AssertFailed();
871
872 Assert(!PGMMapHasConflicts(pVM));
873 return VINF_SUCCESS;
874}
875
876#endif /* IN_RING0 */
877
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette