VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 17121

Last change on this file since 17121 was 17121, checked in by vboxsync, 16 years ago

Removed redundant check

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 28.7 KB
Line 
1/* $Id: PGMAllMap.cpp 17121 2009-02-25 11:53:07Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
227 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
228 return; /* too early */
229#endif
230
231 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
232 Assert(enmShadowMode <= PGMMODE_PAE_NX);
233
234 /*
235 * Init the page tables and insert them into the page directories.
236 */
237 unsigned i = pMap->cPTs;
238 iNewPDE += i;
239 while (i-- > 0)
240 {
241 iNewPDE--;
242
243 switch(enmShadowMode)
244 {
245 case PGMMODE_32_BIT:
246 {
247 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
248 AssertFatal(pShw32BitPd);
249
250 if (pShw32BitPd->a[iNewPDE].n.u1Present)
251 {
252 Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
253 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
254 }
255
256 X86PDE Pde;
257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
258 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
259 pShw32BitPd->a[iNewPDE] = Pde;
260 break;
261 }
262
263 case PGMMODE_PAE:
264 case PGMMODE_PAE_NX:
265 {
266 PX86PDPT pShwPdpt;
267 PX86PDPAE pShwPaePd;
268 const unsigned iPdPt = iNewPDE / 256;
269 unsigned iPDE = iNewPDE * 2 % 512;
270
271 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
272 Assert(pShwPdpt);
273 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
274#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
275 if (!pShwPaePd)
276 {
277 X86PDPE GstPdpe;
278
279 if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
280 {
281 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
282 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
283 }
284 else
285 {
286 PX86PDPE pGstPdpe;
287 pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
288 if (pGstPdpe)
289 GstPdpe = *pGstPdpe;
290 else
291 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
292 }
293 int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
294 AssertFatal(RT_SUCCESS(rc));
295 if (rc != VINF_SUCCESS)
296 {
297 rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
298 AssertFatalMsg(rc == VINF_SUCCESS, ("rc = %Rrc\n", rc));
299 }
300 }
301#endif
302 AssertFatal(pShwPaePd);
303
304 PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
305 AssertFatal(pPoolPagePde);
306
307 if (pShwPaePd->a[iPDE].n.u1Present)
308 {
309 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
310 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
311 }
312
313 X86PDEPAE PdePae0;
314 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
315 pShwPaePd->a[iPDE] = PdePae0;
316
317 /* 2nd 2 MB PDE of the 4 MB region */
318 iPDE++;
319 AssertFatal(iPDE < 512);
320
321 if (pShwPaePd->a[iPDE].n.u1Present)
322 {
323 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
324 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
325 }
326
327 X86PDEPAE PdePae1;
328 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
329 pShwPaePd->a[iPDE] = PdePae1;
330
331 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
332 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
333 break;
334 }
335
336 default:
337 AssertFailed();
338 break;
339 }
340 }
341}
342
343/**
344 * Clears all PDEs involved with the mapping in the shadow page table.
345 *
346 * @param pVM The VM handle.
347 * @param pShwPageCR3 CR3 root page
348 * @param pMap Pointer to the mapping in question.
349 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
350 */
351void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE)
352{
353 Log(("pgmMapClearShadowPDEs old pde %x (mappings enabled %d)\n", iOldPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
354
355 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
356 return;
357
358#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
359 Assert(pShwPageCR3);
360#endif
361
362 unsigned i = pMap->cPTs;
363 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
364
365 iOldPDE += i;
366 while (i-- > 0)
367 {
368 iOldPDE--;
369
370 switch(enmShadowMode)
371 {
372 case PGMMODE_32_BIT:
373 {
374#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
375 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
376#else
377 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
378#endif
379 AssertFatal(pShw32BitPd);
380
381 pShw32BitPd->a[iOldPDE].u = 0;
382 break;
383 }
384
385 case PGMMODE_PAE:
386 case PGMMODE_PAE_NX:
387 {
388 PX86PDPT pPdpt = NULL;
389 PX86PDPAE pShwPaePd = NULL;
390
391 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
392 unsigned iPDE = iOldPDE * 2 % 512;
393#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
394 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
395 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
396#else
397 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
398 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
399#endif
400 AssertFatal(pShwPaePd);
401
402 pShwPaePd->a[iPDE].u = 0;
403
404 iPDE++;
405 AssertFatal(iPDE < 512);
406
407 pShwPaePd->a[iPDE].u = 0;
408 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
409 pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
410 break;
411 }
412
413 default:
414 AssertFailed();
415 break;
416 }
417 }
418}
419#endif /* !IN_RING0 */
420
421#ifdef VBOX_STRICT
422/**
423 * Clears all PDEs involved with the mapping in the shadow page table.
424 *
425 * @param pVM The VM handle.
426 * @param pShwPageCR3 CR3 root page
427 * @param pMap Pointer to the mapping in question.
428 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
429 */
430void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
431{
432#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
433 Assert(pShwPageCR3);
434#endif
435
436 unsigned i = pMap->cPTs;
437 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
438
439 iPDE += i;
440 while (i-- > 0)
441 {
442 iPDE--;
443
444 switch(enmShadowMode)
445 {
446 case PGMMODE_32_BIT:
447 {
448#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
449 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
450#else
451 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
452#endif
453 AssertFatal(pShw32BitPd);
454
455 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
456 ("Expected %x vs %x\n", pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
457 break;
458 }
459
460 case PGMMODE_PAE:
461 case PGMMODE_PAE_NX:
462 {
463 PX86PDPT pPdpt = NULL;
464 PX86PDPAE pShwPaePd = NULL;
465
466 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
467 unsigned iPaePDE = iPDE * 2 % 512;
468#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
469 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
470 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
471#else
472 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
473 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
474#endif
475 AssertFatal(pShwPaePd);
476
477 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
478 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
479
480 iPaePDE++;
481 AssertFatal(iPaePDE < 512);
482
483 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
484 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
485 break;
486 }
487
488 default:
489 AssertFailed();
490 break;
491 }
492 }
493}
494
495/**
496 * Check the hypervisor mappings in the active CR3.
497 *
498 * @param pVM The virtual machine.
499 */
500VMMDECL(void) PGMMapCheck(PVM pVM)
501{
502#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
503 /*
504 * Can skip this if mappings are disabled.
505 */
506 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
507 return;
508
509# ifdef IN_RING0
510 AssertFailed();
511# else
512# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
513 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
514# endif
515
516 /*
517 * Iterate mappings.
518 */
519 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
520 {
521 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
522
523 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
524 }
525# endif /* IN_RING0 */
526#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
527}
528#endif
529
530/**
531 * Apply the hypervisor mappings to the active CR3.
532 *
533 * @returns VBox status.
534 * @param pVM The virtual machine.
535 */
536VMMDECL(int) PGMMapActivateAll(PVM pVM)
537{
538#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
539 /*
540 * Can skip this if mappings are disabled.
541 */
542 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
543#else
544 /*
545 * Can skip this if mappings are safely fixed.
546 */
547 if (pVM->pgm.s.fMappingsFixed)
548#endif
549 return VINF_SUCCESS;
550
551 /* @note A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
552 Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
553
554#ifdef IN_RING0
555 AssertFailed();
556 return VERR_INTERNAL_ERROR;
557#else
558# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
559 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
560# endif
561
562 /*
563 * Iterate mappings.
564 */
565 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
566 {
567 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
568
569 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
570 }
571 return VINF_SUCCESS;
572#endif /* IN_RING0 */
573}
574
575/**
576 * Remove the hypervisor mappings from the active CR3
577 *
578 * @returns VBox status.
579 * @param pVM The virtual machine.
580 */
581VMMDECL(int) PGMMapDeactivateAll(PVM pVM)
582{
583#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
584 /*
585 * Can skip this if mappings are disabled.
586 */
587 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
588#else
589 /*
590 * Can skip this if mappings are safely fixed.
591 */
592 if (pVM->pgm.s.fMappingsFixed)
593#endif
594 return VINF_SUCCESS;
595
596 Log(("PGMMapDeactivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
597
598#ifdef IN_RING0
599 AssertFailed();
600 return VERR_INTERNAL_ERROR;
601#else
602# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
603 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
604# endif
605
606 /*
607 * Iterate mappings.
608 */
609 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
610 {
611 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
612
613 pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
614 }
615 return VINF_SUCCESS;
616#endif /* IN_RING0 */
617}
618
619
620/**
621 * Remove the hypervisor mappings from the specified CR3
622 *
623 * @returns VBox status.
624 * @param pVM The virtual machine.
625 * @param pShwPageCR3 CR3 root page
626 */
627int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
628{
629#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
630 /*
631 * Can skip this if mappings are disabled.
632 */
633 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
634#else
635 /*
636 * Can skip this if mappings are safely fixed.
637 */
638 if (pVM->pgm.s.fMappingsFixed)
639#endif
640 return VINF_SUCCESS;
641
642#ifdef IN_RING0
643 AssertFailed();
644 return VERR_INTERNAL_ERROR;
645#else
646# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
647 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
648# endif
649
650 /*
651 * Iterate mappings.
652 */
653 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
654 {
655 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
656
657 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE);
658 }
659 return VINF_SUCCESS;
660#endif /* IN_RING0 */
661}
662
663#ifndef IN_RING0
664/**
665 * Checks guest PD for conflicts with VMM GC mappings.
666 *
667 * @returns true if conflict detected.
668 * @returns false if not.
669 * @param pVM The virtual machine.
670 */
671VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
672{
673 /*
674 * Can skip this if mappings are safely fixed.
675 */
676 if (pVM->pgm.s.fMappingsFixed)
677 return false;
678
679 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
680 Assert(enmGuestMode <= PGMMODE_PAE_NX);
681
682 /*
683 * Iterate mappings.
684 */
685 if (enmGuestMode == PGMMODE_32_BIT)
686 {
687 /*
688 * Resolve the page directory.
689 */
690 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
691 Assert(pPD);
692
693 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
694 {
695 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
696 unsigned iPT = pCur->cPTs;
697 while (iPT-- > 0)
698 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
699 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
700 {
701 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
702
703#ifdef IN_RING3
704 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
705 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
706 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
707 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
708#else
709 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
710 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
711 (iPT + iPDE) << X86_PD_SHIFT,
712 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
713#endif
714 return true;
715 }
716 }
717 }
718 else if ( enmGuestMode == PGMMODE_PAE
719 || enmGuestMode == PGMMODE_PAE_NX)
720 {
721 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
722 {
723 RTGCPTR GCPtr = pCur->GCPtr;
724
725 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
726 while (iPT-- > 0)
727 {
728 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
729
730 if ( Pde.n.u1Present
731 && (pVM->fRawR0Enabled || Pde.n.u1User))
732 {
733 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
734#ifdef IN_RING3
735 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
736 " PDE=%016RX64.\n",
737 GCPtr, pCur->pszDesc, Pde.u));
738#else
739 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
740 " PDE=%016RX64.\n",
741 GCPtr, Pde.u));
742#endif
743 return true;
744 }
745 GCPtr += (1 << X86_PD_PAE_SHIFT);
746 }
747 }
748 }
749 else
750 AssertFailed();
751
752 return false;
753}
754
755# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
756/**
757 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
758 *
759 * @returns VBox status.
760 * @param pVM The virtual machine.
761 */
762VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
763{
764 /*
765 * Can skip this if mappings are safely fixed.
766 */
767 if (pVM->pgm.s.fMappingsFixed)
768 return VINF_SUCCESS;
769
770 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
771 Assert(enmGuestMode <= PGMMODE_PAE_NX);
772
773 /*
774 * Iterate mappings.
775 */
776 if (enmGuestMode == PGMMODE_32_BIT)
777 {
778 /*
779 * Resolve the page directory.
780 */
781 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
782 Assert(pPD);
783
784 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
785 {
786 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
787 unsigned iPT = pCur->cPTs;
788 while (iPT-- > 0)
789 {
790 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
791 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
792 {
793 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
794
795#ifdef IN_RING3
796 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
797 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
798 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
799 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
800 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
801 AssertRCReturn(rc, rc);
802
803 /*
804 * Update pCur.
805 */
806 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
807 while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))
808 pCur = pCur->CTX_SUFF(pNext);
809 break;
810#else
811 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
812 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
813 (iPT + iPDE) << X86_PD_SHIFT,
814 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
815 return VINF_PGM_SYNC_CR3;
816#endif
817 }
818 }
819 if (!pCur)
820 break;
821 }
822 }
823 else if ( enmGuestMode == PGMMODE_PAE
824 || enmGuestMode == PGMMODE_PAE_NX)
825 {
826 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
827 {
828 RTGCPTR GCPtr = pCur->GCPtr;
829
830 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
831 while (iPT-- > 0)
832 {
833 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
834
835 if ( Pde.n.u1Present
836 && (pVM->fRawR0Enabled || Pde.n.u1User))
837 {
838 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
839#ifdef IN_RING3
840 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
841 " PDE=%016RX64.\n",
842 GCPtr, pCur->pszDesc, Pde.u));
843 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, GCPtr);
844 AssertRCReturn(rc, rc);
845
846 /*
847 * Update pCur.
848 */
849 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
850 while (pCur && pCur->GCPtr < GCPtr)
851 pCur = pCur->CTX_SUFF(pNext);
852 break;
853#else
854 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
855 " PDE=%016RX64.\n",
856 GCPtr, Pde.u));
857 return VINF_PGM_SYNC_CR3;
858#endif
859 }
860 GCPtr += (1 << X86_PD_PAE_SHIFT);
861 }
862 if (!pCur)
863 break;
864 }
865 }
866 else
867 AssertFailed();
868
869 return VINF_SUCCESS;
870}
871# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
872
873#endif /* IN_RING0 */
874
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette