VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 48749

Last change on this file since 48749 was 45786, checked in by vboxsync, 12 years ago

Move HMRCA.asm into the switcher code so we don't need VMMRC.rc.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 34.9 KB
Line 
1/* $Id: PGMAllMap.cpp 45786 2013-04-26 22:35:59Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/em.h>
24#include "PGMInternal.h"
25#include <VBox/vmm/vm.h>
26#include "PGMInline.h"
27#include <VBox/err.h>
28#include <iprt/asm-amd64-x86.h>
29#include <iprt/assert.h>
30
31
32/**
33 * Maps a range of physical pages at a given virtual address
34 * in the guest context.
35 *
36 * The GC virtual address range must be within an existing mapping.
37 *
38 * @returns VBox status code.
39 * @param pVM The virtual machine.
40 * @param GCPtr Where to map the page(s). Must be page aligned.
41 * @param HCPhys Start of the range of physical pages. Must be page aligned.
42 * @param cbPages Number of bytes to map. Must be page aligned.
43 * @param fFlags Page flags (X86_PTE_*).
44 */
45VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
46{
47 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
48
49 /*
50 * Validate input.
51 */
52 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
53 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
54 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
55
56 /* hypervisor defaults */
57 if (!fFlags)
58 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
59
60 /*
61 * Find the mapping.
62 */
63 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
64 while (pCur)
65 {
66 if (GCPtr - pCur->GCPtr < pCur->cb)
67 {
68 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
69 {
70 AssertMsgFailed(("Invalid range!!\n"));
71 return VERR_INVALID_PARAMETER;
72 }
73
74 /*
75 * Setup PTE.
76 */
77 X86PTEPAE Pte;
78 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
79
80 /*
81 * Update the page tables.
82 */
83 for (;;)
84 {
85 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
86 const unsigned iPT = off >> X86_PD_SHIFT;
87 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
88
89 /* 32-bit */
90 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
91
92 /* pae */
93 PGMSHWPTEPAE_SET(pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512], Pte.u);
94
95 /* next */
96 cbPages -= PAGE_SIZE;
97 if (!cbPages)
98 break;
99 GCPtr += PAGE_SIZE;
100 Pte.u += PAGE_SIZE;
101 }
102
103 return VINF_SUCCESS;
104 }
105
106 /* next */
107 pCur = pCur->CTX_SUFF(pNext);
108 }
109
110 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
111 return VERR_INVALID_PARAMETER;
112}
113
114
115/**
116 * Sets (replaces) the page flags for a range of pages in a mapping.
117 *
118 * @returns VBox status.
119 * @param pVM Pointer to the VM.
120 * @param GCPtr Virtual address of the first page in the range.
121 * @param cb Size (in bytes) of the range to apply the modification to.
122 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
123 */
124VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
125{
126 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
127}
128
129
130/**
131 * Modify page flags for a range of pages in a mapping.
132 *
133 * The existing flags are ANDed with the fMask and ORed with the fFlags.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the VM.
137 * @param GCPtr Virtual address of the first page in the range.
138 * @param cb Size (in bytes) of the range to apply the modification to.
139 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
140 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
141 */
142VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
143{
144 /*
145 * Validate input.
146 */
147 AssertMsg(!(fFlags & (X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX)), ("fFlags=%#x\n", fFlags));
148 Assert(cb);
149
150 /*
151 * Align the input.
152 */
153 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
154 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
155 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
156
157 /*
158 * Find the mapping.
159 */
160 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
161 while (pCur)
162 {
163 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
164 if (off < pCur->cb)
165 {
166 AssertMsgReturn(off + cb <= pCur->cb,
167 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
168 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
169 VERR_INVALID_PARAMETER);
170
171 /*
172 * Perform the requested operation.
173 */
174 while (cb > 0)
175 {
176 unsigned iPT = off >> X86_PD_SHIFT;
177 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
178 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
179 {
180 /* 32-Bit */
181 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
182 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
183
184 /* PAE */
185 PPGMSHWPTEPAE pPtePae = &pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512];
186 PGMSHWPTEPAE_SET(*pPtePae,
187 ( PGMSHWPTEPAE_GET_U(*pPtePae)
188 & (fMask | X86_PTE_PAE_PG_MASK))
189 | (fFlags & ~(X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX)));
190
191 /* invalidate tls */
192 PGM_INVL_PG(VMMGetCpu(pVM), (RTGCUINTPTR)pCur->GCPtr + off);
193
194 /* next */
195 iPTE++;
196 cb -= PAGE_SIZE;
197 off += PAGE_SIZE;
198 }
199 }
200
201 return VINF_SUCCESS;
202 }
203 /* next */
204 pCur = pCur->CTX_SUFF(pNext);
205 }
206
207 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
208 return VERR_INVALID_PARAMETER;
209}
210
211
212/**
213 * Get information about a page in a mapping.
214 *
215 * This differs from PGMShwGetPage and PGMGstGetPage in that it only consults
216 * the page table to calculate the flags.
217 *
218 * @returns VINF_SUCCESS, VERR_PAGE_NOT_PRESENT or VERR_NOT_FOUND.
219 * @param pVM Pointer to the VM.
220 * @param GCPtr The page address.
221 * @param pfFlags Where to return the flags. Optional.
222 * @param pHCPhys Where to return the address. Optional.
223 */
224VMMDECL(int) PGMMapGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
225{
226 /*
227 * Find the mapping.
228 */
229 GCPtr &= PAGE_BASE_GC_MASK;
230 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
231 while (pCur)
232 {
233 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
234 if (off < pCur->cb)
235 {
236 /*
237 * Dig out the information.
238 */
239 int rc = VINF_SUCCESS;
240 unsigned iPT = off >> X86_PD_SHIFT;
241 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
242 PCPGMSHWPTEPAE pPtePae = &pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512];
243 if (PGMSHWPTEPAE_IS_P(*pPtePae))
244 {
245 if (pfFlags)
246 *pfFlags = PGMSHWPTEPAE_GET_U(*pPtePae) & ~X86_PTE_PAE_PG_MASK;
247 if (pHCPhys)
248 *pHCPhys = PGMSHWPTEPAE_GET_HCPHYS(*pPtePae);
249 }
250 else
251 rc = VERR_PAGE_NOT_PRESENT;
252 return rc;
253 }
254 /* next */
255 pCur = pCur->CTX_SUFF(pNext);
256 }
257
258 return VERR_NOT_FOUND;
259}
260
261#ifndef PGM_WITHOUT_MAPPINGS
262
263/**
264 * Sets all PDEs involved with the mapping in the shadow page table.
265 *
266 * Ignored if mappings are disabled (i.e. if HM is enabled).
267 *
268 * @param pVM Pointer to the VM.
269 * @param pMap Pointer to the mapping in question.
270 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
271 */
272void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
273{
274 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(pVM)));
275
276 if (!pgmMapAreMappingsEnabled(pVM))
277 return;
278
279 /* This only applies to raw mode where we only support 1 VCPU. */
280 PVMCPU pVCpu = VMMGetCpu0(pVM);
281 if (!pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
282 return; /* too early */
283
284 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
285 Assert(enmShadowMode <= PGMMODE_PAE_NX);
286
287 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
288
289 /*
290 * Insert the page tables into the shadow page directories.
291 */
292 unsigned i = pMap->cPTs;
293 iNewPDE += i;
294 while (i-- > 0)
295 {
296 iNewPDE--;
297
298 switch (enmShadowMode)
299 {
300 case PGMMODE_32_BIT:
301 {
302 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu);
303 AssertFatal(pShw32BitPd);
304
305 /* Free any previous user, unless it's us. */
306 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
307 || (pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPT);
308 if ( pShw32BitPd->a[iNewPDE].n.u1Present
309 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
310 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
311
312 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */
313 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
314 | (uint32_t)pMap->aPTs[i].HCPhysPT;
315 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd);
316 break;
317 }
318
319 case PGMMODE_PAE:
320 case PGMMODE_PAE_NX:
321 {
322 const uint32_t iPdPt = iNewPDE / 256;
323 unsigned iPaePde = iNewPDE * 2 % 512;
324 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
325 Assert(pShwPdpt);
326
327 /*
328 * Get the shadow PD.
329 * If no PD, sync it (PAE guest) or fake (not present or 32-bit guest).
330 * Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the
331 * accessed bit causes invalid VT-x guest state errors.
332 */
333 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT);
334 if (!pShwPaePd)
335 {
336 X86PDPE GstPdpe;
337 if (PGMGetGuestMode(pVCpu) < PGMMODE_PAE)
338 GstPdpe.u = X86_PDPE_P;
339 else
340 {
341 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(pVCpu, iPdPt << X86_PDPT_SHIFT);
342 if (pGstPdpe)
343 GstPdpe = *pGstPdpe;
344 else
345 GstPdpe.u = X86_PDPE_P;
346 }
347 int rc = pgmShwSyncPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT, GstPdpe.u, &pShwPaePd);
348 AssertFatalRC(rc);
349 }
350 Assert(pShwPaePd);
351
352 /*
353 * Mark the page as locked; disallow flushing.
354 */
355 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
356 AssertFatal(pPoolPagePd);
357 if (!pgmPoolIsPageLocked(pPoolPagePd))
358 pgmPoolLockPage(pPool, pPoolPagePd);
359# ifdef VBOX_STRICT
360 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)
361 {
362 Assert(PGMGetGuestMode(pVCpu) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */
363 AssertFatalMsg( (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0
364 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
365 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
366 Assert(pShwPaePd->a[iPaePde+1].u & PGM_PDFLAGS_MAPPING);
367 AssertFatalMsg( (pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1
368 || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)),
369 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
370 }
371# endif
372
373 /*
374 * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us).
375 */
376 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
377 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0);
378 if ( pShwPaePd->a[iPaePde].n.u1Present
379 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
380 {
381 Assert(!(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
382 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK, pPoolPagePd->idx, iPaePde);
383 }
384 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
385 | pMap->aPTs[i].HCPhysPaePT0;
386
387 /* 2nd 2 MB PDE of the 4 MB region, same as above. */
388 iPaePde++;
389 AssertFatal(iPaePde < 512);
390 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
391 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1);
392 if ( pShwPaePd->a[iPaePde].n.u1Present
393 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING))
394 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde);
395 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
396 | pMap->aPTs[i].HCPhysPaePT1;
397
398 /*
399 * Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode)
400 */
401 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
402
403 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd);
404 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt);
405 break;
406 }
407
408 default:
409 AssertFailed();
410 break;
411 }
412 }
413}
414
415
416/**
417 * Clears all PDEs involved with the mapping in the shadow page table.
418 *
419 * Ignored if mappings are disabled (i.e. if HM is enabled).
420 *
421 * @param pVM Pointer to the VM.
422 * @param pShwPageCR3 CR3 root page
423 * @param pMap Pointer to the mapping in question.
424 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
425 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
426 */
427void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
428{
429 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(pVM), fDeactivateCR3));
430
431 /*
432 * Skip this if it doesn't apply.
433 */
434 if (!pgmMapAreMappingsEnabled(pVM))
435 return;
436
437 Assert(pShwPageCR3);
438
439 /* This only applies to raw mode where we only support 1 VCPU. */
440 PVMCPU pVCpu = VMMGetCpu0(pVM);
441# ifdef IN_RC
442 Assert(pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
443# endif
444
445 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
446
447 PX86PDPT pCurrentShwPdpt = NULL;
448 if ( PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
449 && pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
450 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
451
452 unsigned i = pMap->cPTs;
453 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
454
455 iOldPDE += i;
456 while (i-- > 0)
457 {
458 iOldPDE--;
459
460 switch(enmShadowMode)
461 {
462 case PGMMODE_32_BIT:
463 {
464 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
465 AssertFatal(pShw32BitPd);
466
467 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
468 pShw32BitPd->a[iOldPDE].u = 0;
469 break;
470 }
471
472 case PGMMODE_PAE:
473 case PGMMODE_PAE_NX:
474 {
475 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
476 unsigned iPaePde = iOldPDE * 2 % 512;
477 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
478 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
479
480 /*
481 * Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode)
482 */
483 if (fDeactivateCR3)
484 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
485 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
486 {
487 /* See if there are any other mappings here. This is suboptimal code. */
488 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
489 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
490 if ( pCur != pMap
491 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
492 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
493 {
494 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
495 break;
496 }
497 }
498
499 /*
500 * If the page directory of the old CR3 is reused in the new one, then don't
501 * clear the hypervisor mappings.
502 */
503 if ( pCurrentShwPdpt
504 && (pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) )
505 {
506 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
507 break;
508 }
509
510 /*
511 * Clear the mappings in the PD.
512 */
513 AssertFatal(pShwPaePd);
514 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
515 pShwPaePd->a[iPaePde].u = 0;
516
517 iPaePde++;
518 AssertFatal(iPaePde < 512);
519 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING));
520 pShwPaePd->a[iPaePde].u = 0;
521
522 /*
523 * Unlock the shadow pool PD page if the PDPTE no longer holds any mappings.
524 */
525 if ( fDeactivateCR3
526 || !(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
527 {
528 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
529 AssertFatal(pPoolPagePd);
530 if (pgmPoolIsPageLocked(pPoolPagePd))
531 pgmPoolUnlockPage(pPool, pPoolPagePd);
532 }
533 break;
534 }
535
536 default:
537 AssertFailed();
538 break;
539 }
540 }
541
542 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt);
543}
544
545#endif /* PGM_WITHOUT_MAPPINGS */
546#if defined(VBOX_STRICT) && !defined(IN_RING0)
547
548/**
549 * Clears all PDEs involved with the mapping in the shadow page table.
550 *
551 * @param pVM Pointer to the VM.
552 * @param pVCpu Pointer to the VMCPU.
553 * @param pShwPageCR3 CR3 root page
554 * @param pMap Pointer to the mapping in question.
555 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
556 */
557static void pgmMapCheckShadowPDEs(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
558{
559 Assert(pShwPageCR3);
560
561 uint32_t i = pMap->cPTs;
562 PGMMODE enmShadowMode = PGMGetShadowMode(pVCpu);
563 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
564
565 iPDE += i;
566 while (i-- > 0)
567 {
568 iPDE--;
569
570 switch (enmShadowMode)
571 {
572 case PGMMODE_32_BIT:
573 {
574 PCX86PD pShw32BitPd = (PCX86PD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
575 AssertFatal(pShw32BitPd);
576
577 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
578 ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
579 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
580 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
581 break;
582 }
583
584 case PGMMODE_PAE:
585 case PGMMODE_PAE_NX:
586 {
587 const unsigned iPdpt = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
588 unsigned iPaePDE = iPDE * 2 % 512;
589 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPageCR3);
590 PCX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(pVCpu, pShwPdpt, iPdpt << X86_PDPT_SHIFT);
591 AssertFatal(pShwPaePd);
592
593 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
594 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
595 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
596 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
597
598 iPaePDE++;
599 AssertFatal(iPaePDE < 512);
600
601 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
602 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
603 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
604 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
605
606 AssertMsg(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING,
607 ("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
608 pShwPdpt->a[iPdpt].u,
609 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
610
611 PCPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
612 AssertFatal(pPoolPagePd);
613 AssertMsg(pPoolPagePd->cLocked, (".idx=%d .type=%d\n", pPoolPagePd->idx, pPoolPagePd->enmKind));
614 break;
615 }
616
617 default:
618 AssertFailed();
619 break;
620 }
621 }
622}
623
624
625/**
626 * Check the hypervisor mappings in the active CR3.
627 *
628 * Ignored if mappings are disabled (i.e. if HM is enabled).
629 *
630 * @param pVM The virtual machine.
631 */
632VMMDECL(void) PGMMapCheck(PVM pVM)
633{
634 /*
635 * Can skip this if mappings are disabled.
636 */
637 if (!pgmMapAreMappingsEnabled(pVM))
638 return;
639
640 /* This only applies to raw mode where we only support 1 VCPU. */
641 Assert(pVM->cCpus == 1);
642 PVMCPU pVCpu = VMMGetCpu0(pVM);
643 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
644
645 /*
646 * Iterate mappings.
647 */
648 pgmLock(pVM); /* to avoid assertions */
649 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
650 {
651 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
652 pgmMapCheckShadowPDEs(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
653 }
654 pgmUnlock(pVM);
655}
656
657#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
658#ifndef PGM_WITHOUT_MAPPINGS
659
660/**
661 * Apply the hypervisor mappings to the active CR3.
662 *
663 * Ignored if mappings are disabled (i.e. if HM is enabled).
664 *
665 * @returns VBox status.
666 * @param pVM The virtual machine.
667 * @param pShwPageCR3 CR3 root page
668 */
669int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
670{
671 /*
672 * Skip this if it doesn't apply.
673 */
674 if (!pgmMapAreMappingsEnabled(pVM))
675 return VINF_SUCCESS;
676
677 /* Note! This might not be logged successfully in RC because we usually
678 cannot flush the log at this point. */
679 Log4(("pgmMapActivateCR3: fixed mappings=%RTbool idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
680
681#ifdef VBOX_STRICT
682 PVMCPU pVCpu = VMMGetCpu0(pVM);
683 Assert(pShwPageCR3 && pShwPageCR3 == pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
684#endif
685
686 /*
687 * Iterate mappings.
688 */
689 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
690 {
691 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
692 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
693 }
694 return VINF_SUCCESS;
695}
696
697
698/**
699 * Remove the hypervisor mappings from the specified CR3
700 *
701 * Ignored if mappings are disabled (i.e. if HM is enabled).
702 *
703 * @returns VBox status.
704 * @param pVM The virtual machine.
705 * @param pShwPageCR3 CR3 root page
706 */
707int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
708{
709 /*
710 * Skip this if it doesn't apply.
711 */
712 if (!pgmMapAreMappingsEnabled(pVM))
713 return VINF_SUCCESS;
714
715 Assert(pShwPageCR3);
716 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
717
718 /*
719 * Iterate mappings.
720 */
721 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
722 {
723 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
724 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
725 }
726 return VINF_SUCCESS;
727}
728
729
730/**
731 * Checks guest PD for conflicts with VMM GC mappings.
732 *
733 * @returns true if conflict detected.
734 * @returns false if not.
735 * @param pVM The virtual machine.
736 */
737VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
738{
739 /*
740 * Can skip this if mappings are safely fixed.
741 */
742 if (!pgmMapAreMappingsFloating(pVM))
743 return false;
744 AssertReturn(pgmMapAreMappingsEnabled(pVM), false);
745
746 /* This only applies to raw mode where we only support 1 VCPU. */
747 PVMCPU pVCpu = &pVM->aCpus[0];
748
749 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
750 Assert(enmGuestMode <= PGMMODE_PAE_NX);
751
752 /*
753 * Iterate mappings.
754 */
755 if (enmGuestMode == PGMMODE_32_BIT)
756 {
757 /*
758 * Resolve the page directory.
759 */
760 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
761 Assert(pPD);
762
763 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
764 {
765 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
766 unsigned iPT = pCur->cPTs;
767 while (iPT-- > 0)
768 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
769 && (EMIsRawRing0Enabled(pVM) || pPD->a[iPDE + iPT].n.u1User))
770 {
771 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
772
773# ifdef IN_RING3
774 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
775 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
776 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
777 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
778# else
779 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
780 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
781 (iPT + iPDE) << X86_PD_SHIFT,
782 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
783# endif
784 return true;
785 }
786 }
787 }
788 else if ( enmGuestMode == PGMMODE_PAE
789 || enmGuestMode == PGMMODE_PAE_NX)
790 {
791 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
792 {
793 RTGCPTR GCPtr = pCur->GCPtr;
794
795 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
796 while (iPT-- > 0)
797 {
798 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
799
800 if ( Pde.n.u1Present
801 && (EMIsRawRing0Enabled(pVM) || Pde.n.u1User))
802 {
803 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
804# ifdef IN_RING3
805 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
806 " PDE=%016RX64.\n",
807 GCPtr, pCur->pszDesc, Pde.u));
808# else
809 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
810 " PDE=%016RX64.\n",
811 GCPtr, Pde.u));
812# endif
813 return true;
814 }
815 GCPtr += (1 << X86_PD_PAE_SHIFT);
816 }
817 }
818 }
819 else
820 AssertFailed();
821
822 return false;
823}
824
825
826/**
827 * Checks and resolves (ring 3 only) guest conflicts with the guest mappings.
828 *
829 * @returns VBox status.
830 * @param pVM The virtual machine.
831 */
832int pgmMapResolveConflicts(PVM pVM)
833{
834 /* The caller is expected to check these two conditions. */
835 Assert(!pVM->pgm.s.fMappingsFixed);
836 Assert(pgmMapAreMappingsEnabled(pVM));
837
838 /* This only applies to raw mode where we only support 1 VCPU. */
839 Assert(pVM->cCpus == 1);
840 PVMCPU pVCpu = &pVM->aCpus[0];
841 PGMMODE const enmGuestMode = PGMGetGuestMode(pVCpu);
842 Assert(enmGuestMode <= PGMMODE_PAE_NX);
843
844 if (enmGuestMode == PGMMODE_32_BIT)
845 {
846 /*
847 * Resolve the page directory.
848 */
849 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
850 Assert(pPD);
851
852 /*
853 * Iterate mappings.
854 */
855 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
856 {
857 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
858 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
859 unsigned iPT = pCur->cPTs;
860 while (iPT-- > 0)
861 {
862 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
863 && ( EMIsRawRing0Enabled(pVM)
864 || pPD->a[iPDE + iPT].n.u1User))
865 {
866 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
867
868# ifdef IN_RING3
869 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
870 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
871 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
872 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
873 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
874 AssertRCReturn(rc, rc);
875 break;
876# else
877 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
878 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
879 (iPT + iPDE) << X86_PD_SHIFT,
880 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
881 return VINF_PGM_SYNC_CR3;
882# endif
883 }
884 }
885 pCur = pNext;
886 }
887 }
888 else if ( enmGuestMode == PGMMODE_PAE
889 || enmGuestMode == PGMMODE_PAE_NX)
890 {
891 /*
892 * Iterate mappings.
893 */
894 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
895 {
896 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
897 RTGCPTR GCPtr = pCur->GCPtr;
898 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
899 while (iPT-- > 0)
900 {
901 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
902
903 if ( Pde.n.u1Present
904 && (EMIsRawRing0Enabled(pVM) || Pde.n.u1User))
905 {
906 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
907#ifdef IN_RING3
908 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
909 " PDE=%016RX64.\n",
910 GCPtr, pCur->pszDesc, Pde.u));
911 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
912 AssertRCReturn(rc, rc);
913 break;
914#else
915 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
916 " PDE=%016RX64.\n",
917 GCPtr, Pde.u));
918 return VINF_PGM_SYNC_CR3;
919#endif
920 }
921 GCPtr += (1 << X86_PD_PAE_SHIFT);
922 }
923 pCur = pNext;
924 }
925 }
926 else
927 AssertFailed();
928
929 Assert(!PGMMapHasConflicts(pVM));
930 return VINF_SUCCESS;
931}
932
933#endif /* PGM_WITHOUT_MAPPINGS */
934
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette