VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 16524

Last change on this file since 16524 was 16418, checked in by vboxsync, 16 years ago

Updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 14.2 KB
Line 
1/* $Id: PGMAllMap.cpp 16418 2009-01-30 14:49:06Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211/**
212 * Sets all PDEs involved with the mapping in the shadow page table.
213 *
214 * @param pVM The VM handle.
215 * @param pMap Pointer to the mapping in question.
216 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
217 */
218void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
219{
220 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
221 return;
222
223#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
224 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
225 return; /* too early */
226#endif
227
228 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
229 Assert(enmShadowMode <= PGMMODE_PAE_NX);
230
231 /*
232 * Init the page tables and insert them into the page directories.
233 */
234 unsigned i = pMap->cPTs;
235 iNewPDE += i;
236 while (i-- > 0)
237 {
238 iNewPDE--;
239
240 switch(enmShadowMode)
241 {
242 case PGMMODE_32_BIT:
243 {
244 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
245 AssertFatal(pShw32BitPd);
246
247 if (pShw32BitPd->a[iNewPDE].n.u1Present)
248 {
249 Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
250 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
251 }
252
253 X86PDE Pde;
254 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
255 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
256 pShw32BitPd->a[iNewPDE] = Pde;
257 break;
258 }
259
260 case PGMMODE_PAE:
261 case PGMMODE_PAE_NX:
262 {
263 PX86PDPT pShwPdpt;
264 PX86PDPAE pShwPaePd;
265 const unsigned iPdPt = iNewPDE / 256;
266 unsigned iPDE = iNewPDE * 2 % 512;
267
268 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
269 Assert(pShwPdpt);
270 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
271 AssertFatal(pShwPaePd);
272
273 PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
274 AssertFatal(pPoolPagePde);
275
276 if (pShwPaePd->a[iPDE].n.u1Present)
277 {
278 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
279 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
280 }
281
282 X86PDEPAE PdePae0;
283 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
284 pShwPaePd->a[iPDE] = PdePae0;
285
286 /* 2nd 2 MB PDE of the 4 MB region */
287 iPDE++;
288 AssertFatal(iPDE < 512);
289
290 if (pShwPaePd->a[iPDE].n.u1Present)
291 {
292 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
293 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
294 }
295
296 X86PDEPAE PdePae1;
297 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
298 pShwPaePd->a[iPDE] = PdePae1;
299
300 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
301 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
302 }
303
304 default:
305 AssertFailed();
306 break;
307 }
308 }
309}
310
311/**
312 * Clears all PDEs involved with the mapping in the shadow page table.
313 *
314 * @param pVM The VM handle.
315 * @param pMap Pointer to the mapping in question.
316 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
317 */
318void pgmMapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
319{
320 unsigned i = pMap->cPTs;
321 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
322
323 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
324 return;
325
326#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
327 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
328 return; /* too early */
329#endif
330
331 iOldPDE += i;
332 while (i-- > 0)
333 {
334 iOldPDE--;
335
336 switch(enmShadowMode)
337 {
338 case PGMMODE_32_BIT:
339 {
340 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
341 AssertFatal(pShw32BitPd);
342
343 pShw32BitPd->a[iOldPDE].u = 0;
344 break;
345 }
346
347 case PGMMODE_PAE:
348 case PGMMODE_PAE_NX:
349 {
350 PX86PDPT pPdpt = NULL;
351 PX86PDPAE pShwPaePd = NULL;
352
353 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
354 unsigned iPDE = iOldPDE * 2 % 512;
355 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
356 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
357 AssertFatal(pShwPaePd);
358
359 pShwPaePd->a[iPDE].u = 0;
360
361 iPDE++;
362 AssertFatal(iPDE < 512);
363
364 pShwPaePd->a[iPDE].u = 0;
365 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
366 pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
367 break;
368 }
369
370 default:
371 AssertFailed();
372 break;
373 }
374 }
375}
376
377/**
378 * Apply the hypervisor mappings to the active CR3.
379 *
380 * @returns VBox status.
381 * @param pVM The virtual machine.
382 */
383VMMDECL(int) PGMMapActivateAll(PVM pVM)
384{
385 /*
386 * Can skip this if mappings are safely fixed.
387 */
388 if (pVM->pgm.s.fMappingsFixed)
389 return VINF_SUCCESS;
390
391#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
392 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
393 return VINF_SUCCESS; /* too early */
394#endif
395
396 Assert(PGMGetGuestMode(pVM) >= PGMMODE_32_BIT && PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX);
397
398 /*
399 * Iterate mappings.
400 */
401 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
402 {
403 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
404
405 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
406 }
407
408 return VINF_SUCCESS;
409}
410
411/**
412 * Remove the hypervisor mappings from the active CR3
413 *
414 * @returns VBox status.
415 * @param pVM The virtual machine.
416 */
417VMMDECL(int) PGMMapDeactivateAll(PVM pVM)
418{
419 /*
420 * Can skip this if mappings are safely fixed.
421 */
422 if (pVM->pgm.s.fMappingsFixed)
423 return VINF_SUCCESS;
424
425#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
426 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
427 return VINF_SUCCESS; /* too early */
428
429 Assert(PGMGetGuestMode(pVM) >= PGMMODE_32_BIT && PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX);
430#endif
431
432 /*
433 * Iterate mappings.
434 */
435 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
436 {
437 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
438
439 pgmMapClearShadowPDEs(pVM, pCur, iPDE);
440 }
441 return VINF_SUCCESS;
442}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette