VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllShw.h@ 1507

Last change on this file since 1507 was 23, checked in by vboxsync, 18 years ago

string.h & stdio.h + header cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 14.2 KB
Line 
1/* $Id: PGMAllShw.h 23 2007-01-15 14:08:28Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/*******************************************************************************
23* Defined Constants And Macros *
24*******************************************************************************/
25#undef SHWPT
26#undef PSHWPT
27#undef SHWPTE
28#undef PSHWPTE
29#undef SHWPD
30#undef PSHWPD
31#undef SHWPDE
32#undef PSHWPDE
33#undef SHW_PDE_PG_MASK
34#undef SHW_PD_SHIFT
35#undef SHW_PD_MASK
36#undef SHW_PTE_PG_MASK
37#undef SHW_PT_SHIFT
38#undef SHW_PT_MASK
39#undef SHW_TOTAL_PD_ENTRIES
40#undef SHW_PDPTR_SHIFT
41#undef SHW_PDPTR_MASK
42#undef SHW_POOL_ROOT_IDX
43
44#if PGM_SHW_TYPE == PGM_TYPE_32BIT
45# define SHWPT X86PT
46# define PSHWPT PX86PT
47# define SHWPTE X86PTE
48# define PSHWPTE PX86PTE
49# define SHWPD X86PD
50# define PSHWPD PX86PD
51# define SHWPDE X86PDE
52# define PSHWPDE PX86PDE
53# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
54# define SHW_PD_SHIFT X86_PD_SHIFT
55# define SHW_PD_MASK X86_PD_MASK
56# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
57# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
58# define SHW_PT_SHIFT X86_PT_SHIFT
59# define SHW_PT_MASK X86_PT_MASK
60# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD
61#else
62# define SHWPT X86PTPAE
63# define PSHWPT PX86PTPAE
64# define SHWPTE X86PTEPAE
65# define PSHWPTE PX86PTEPAE
66# define SHWPD X86PDPAE
67# define PSHWPD PX86PDPAE
68# define SHWPDE X86PDEPAE
69# define PSHWPDE PX86PDEPAE
70# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
71# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
72# define SHW_PD_MASK X86_PD_PAE_MASK
73# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
74# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
75# define SHW_PT_MASK X86_PT_PAE_MASK
76#if PGM_SHW_TYPE == PGM_TYPE_AMD64
77# define SHW_PDPTR_SHIFT X86_PDPTR_SHIFT
78# define SHW_PDPTR_MASK X86_PDPTR_MASK
79# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PML4
80#else /* 32 bits PAE mode */
81# define SHW_PDPTR_SHIFT X86_PDPTR_SHIFT
82# define SHW_PDPTR_MASK X86_PDPTR_MASK_32
83# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*4)
84# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD
85#endif
86#endif
87
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93__BEGIN_DECLS
94PGM_SHW_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
95PGM_SHW_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask);
96PGM_SHW_DECL(int, GetPDEByIndex)(PVM pVM, uint32_t iPD, PX86PDEPAE pPde);
97PGM_SHW_DECL(int, SetPDEByIndex)(PVM pVM, uint32_t iPD, X86PDEPAE Pde);
98PGM_SHW_DECL(int, ModifyPDEByIndex)(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask);
99__END_DECLS
100
101
102
103/**
104 * Gets effective page information (from the VMM page directory).
105 *
106 * @returns VBox status.
107 * @param pVM VM Handle.
108 * @param GCPtr Guest Context virtual address of the page.
109 * @param pfFlags Where to store the flags. These are X86_PTE_*.
110 * @param pHCPhys Where to store the HC physical address of the page.
111 * This is page aligned.
112 * @remark You should use PGMMapGetPage() for pages in a mapping.
113 */
114PGM_SHW_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
115{
116 /*
117 * Get the PDE.
118 */
119#if PGM_SHW_TYPE == PGM_TYPE_AMD64
120 /*
121 * For the first 4G we have preallocated page directories.
122 * Since the two upper levels contains only fixed flags, we skip those when possible.
123 */
124 X86PDEPAE Pde;
125#if GC_ARCH_BITS == 64
126 if (GCPtr < _4G)
127#endif
128 {
129 const unsigned iPdptr = (GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
130 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
131 Pde = CTXMID(pVM->pgm.s.ap,PaePDs)[iPdptr]->a[iPd];
132 }
133#if GC_ARCH_BITS == 64
134 else
135 {
136 /* PML4 */
137 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
138 X86PML4E Pml4e = CTXMID(pVM->pgm.s.p,PaePML4)->a[iPml4];
139 if (!Pml4e.n.u1Present)
140 return VERR_PAGE_TABLE_NOT_PRESENT;
141
142 /* PDPTR */
143 PX86PDPTR pPdPtr;
144 int rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPdPtr);
145 if (VBOX_FAILURE(rc))
146 return rc;
147 const unsigned iPdptr = (GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
148 X86PDPE Pdpe = pPdPtr->a[iPdptr];
149 if (!Pdpe.n.u1Present)
150 return VERR_PAGE_TABLE_NOT_PRESENT;
151
152 /* PD */
153 PX86PDPAE pPd;
154 rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
155 if (VBOX_FAILURE(rc))
156 return rc;
157 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
158 Pdpe = pPdPtr->a[iPd];
159 }
160#endif /* GC_ARCH_BITS == 64 */
161
162#elif PGM_SHW_TYPE == PGM_TYPE_PAE
163 const unsigned iPdptr = (GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
164 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
165 X86PDEPAE Pde = CTXMID(pVM->pgm.s.ap,PaePDs)[iPdptr]->a[iPd];
166
167#else /* PGM_TYPE_32BIT */
168 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
169 X86PDE Pde = CTXMID(pVM->pgm.s.p,32BitPD)->a[iPd];
170#endif
171 if (!Pde.n.u1Present)
172 return VERR_PAGE_TABLE_NOT_PRESENT;
173
174 /*
175 * Get PT entry.
176 */
177 PSHWPT pPT;
178 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
179 {
180 int rc = PGM_HCPHYS_2_PTR(pVM, Pde.u & SHW_PDE_PG_MASK, &pPT);
181 if (VBOX_FAILURE(rc))
182 return rc;
183 }
184 else /* mapping: */
185 {
186 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
187
188 PPGMMAPPING pMap = pgmGetMapping(pVM, (RTGCPTR)GCPtr);
189 AssertMsgReturn(pMap, ("GCPtr=%VGv\n", GCPtr), VERR_INTERNAL_ERROR);
190#if PGM_SHW_TYPE == PGM_TYPE_32BIT
191 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> PGDIR_SHIFT].CTXSUFF(pPT);
192#else /* PAE and AMD64: */
193 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> PGDIR_SHIFT].CTXSUFF(paPaePTs);
194#endif
195 }
196 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
197 SHWPTE Pte = pPT->a[iPt];
198 if (!Pte.n.u1Present)
199 return VERR_PAGE_NOT_PRESENT;
200
201 /*
202 * Store the results.
203 * RW and US flags depend on the entire page transation hierarchy - except for
204 * legacy PAE which has a simplified PDPE.
205 */
206 if (pfFlags)
207 *pfFlags = (Pte.u & ~SHW_PTE_PG_MASK)
208 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
209 if (pHCPhys)
210 *pHCPhys = Pte.u & SHW_PTE_PG_MASK;
211
212 return VINF_SUCCESS;
213}
214
215
216/**
217 * Modify page flags for a range of pages in the shadow context.
218 *
219 * The existing flags are ANDed with the fMask and ORed with the fFlags.
220 *
221 * @returns VBox status code.
222 * @param pVM VM handle.
223 * @param GCPtr Virtual address of the first page in the range. Page aligned!
224 * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
225 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
226 * @param fMask The AND mask - page flags X86_PTE_*.
227 * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
228 * @remark You must use PGMMapModifyPage() for pages in a mapping.
229 */
230PGM_SHW_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
231{
232 /*
233 * Walk page tables and pages till we're done.
234 */
235 for (;;)
236 {
237 /*
238 * Get the PDE.
239 */
240#if PGM_SHW_TYPE == PGM_TYPE_AMD64
241 /*
242 * For the first 4G we have preallocated page directories.
243 * Since the two upper levels contains only fixed flags, we skip those when possible.
244 */
245 X86PDEPAE Pde;
246#if GC_ARCH_BITS == 64
247 if (GCPtr < _4G)
248#endif
249 {
250 const unsigned iPdptr = (GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
251 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
252 Pde = CTXMID(pVM->pgm.s.ap,PaePDs)[iPdptr]->a[iPd];
253 }
254#if GC_ARCH_BITS == 64
255 else
256 {
257 /* PML4 */
258 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
259 X86PML4E Pml4e = CTXMID(pVM->pgm.s.p,PaePML4)->a[iPml4];
260 if (!Pml4e.n.u1Present)
261 return VERR_PAGE_TABLE_NOT_PRESENT;
262
263 /* PDPTR */
264 PX86PDPTR pPdPtr;
265 int rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPdPtr);
266 if (VBOX_FAILURE(rc))
267 return rc;
268 const unsigned iPdptr = (GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
269 X86PDPE Pdpe = pPdPtr->a[iPdptr];
270 if (!Pdpe.n.u1Present)
271 return VERR_PAGE_TABLE_NOT_PRESENT;
272
273 /* PD */
274 PX86PDPAE pPd;
275 rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
276 if (VBOX_FAILURE(rc))
277 return rc;
278 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
279 Pdpe = pPdPtr->a[iPd];
280 }
281#endif /* GC_ARCH_BITS == 64 */
282
283#elif PGM_SHW_TYPE == PGM_TYPE_PAE
284 const unsigned iPdptr = (GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
285 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
286 X86PDEPAE Pde = CTXMID(pVM->pgm.s.ap,PaePDs)[iPdptr]->a[iPd];
287
288#else /* PGM_TYPE_32BIT */
289 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
290 X86PDE Pde = CTXMID(pVM->pgm.s.p,32BitPD)->a[iPd];
291#endif
292 if (!Pde.n.u1Present)
293 return VERR_PAGE_TABLE_NOT_PRESENT;
294
295
296 /*
297 * Map the page table.
298 */
299 PSHWPT pPT;
300 int rc = PGM_HCPHYS_2_PTR(pVM, Pde.u & SHW_PDE_PG_MASK, &pPT);
301 if (VBOX_FAILURE(rc))
302 return rc;
303
304 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
305 while (iPTE < ELEMENTS(pPT->a))
306 {
307 if (pPT->a[iPTE].n.u1Present)
308 {
309 pPT->a[iPTE].u = (pPT->a[iPTE].u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK);
310 Assert(pPT->a[iPTE].n.u1Present);
311 PGM_INVL_PG(GCPtr);
312 }
313
314 /* next page */
315 cb -= PAGE_SIZE;
316 if (!cb)
317 return VINF_SUCCESS;
318 GCPtr += PAGE_SIZE;
319 iPTE++;
320 }
321 }
322}
323
324/**
325 * Retrieve shadow PDE
326 *
327 * @returns VBox status code.
328 * @param pVM The virtual machine.
329 * @param iPD Shadow PDE index.
330 * @param pPde Where to store the shadow PDE entry.
331 */
332PGM_SHW_DECL(int, GetPDEByIndex)(PVM pVM, unsigned iPD, PX86PDEPAE pPde)
333{
334#if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE
335 /*
336 * Get page directory addresses.
337 */
338 Assert(iPD < SHW_TOTAL_PD_ENTRIES);
339# if PGM_SHW_TYPE == PGM_TYPE_32BIT
340 PX86PDE pPdeSrc = &CTXMID(pVM->pgm.s.p,32BitPD)->a[iPD];
341# else
342 PX86PDEPAE pPdeSrc = &CTXMID(pVM->pgm.s.ap,PaePDs)[0]->a[iPD]; /* We treat this as a PD with 2048 entries. */
343# endif
344
345 pPde->u = (X86PGPAEUINT)pPdeSrc->u;
346 return VINF_SUCCESS;
347
348#else
349 AssertFailed();
350 return VERR_NOT_IMPLEMENTED;
351#endif
352}
353
354/**
355 * Set shadow PDE
356 *
357 * @returns VBox status code.
358 * @param pVM The virtual machine.
359 * @param iPD Shadow PDE index.
360 * @param Pde Shadow PDE.
361 */
362PGM_SHW_DECL(int, SetPDEByIndex)(PVM pVM, unsigned iPD, X86PDEPAE Pde)
363{
364#if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE
365 /*
366 * Get page directory addresses and update the specified entry.
367 */
368 Assert(iPD < SHW_TOTAL_PD_ENTRIES);
369# if PGM_SHW_TYPE == PGM_TYPE_32BIT
370 Assert(Pde.au32[1] == 0); /* First uint32_t is backwards compatible. */
371 Assert(Pde.n.u1Size == 0);
372 PX86PDE pPdeDst = &CTXMID(pVM->pgm.s.p,32BitPD)->a[iPD];
373 pPdeDst->u = Pde.au32[0];
374# else
375 PX86PDEPAE pPdeDst = &CTXMID(pVM->pgm.s.ap,PaePDs)[0]->a[iPD]; /* We treat this as a PD with 2048 entries. */
376 pPdeDst->u = Pde.u;
377# endif
378 Assert(pPdeDst->n.u1Present);
379
380 return VINF_SUCCESS;
381#else
382 AssertFailed();
383 return VERR_NOT_IMPLEMENTED;
384#endif
385}
386
387/**
388 * Modify shadow PDE
389 *
390 * @returns VBox status code.
391 * @param pVM The virtual machine.
392 * @param iPD Shadow PDE index.
393 * @param fFlags The OR mask - page flags X86_PDE_*, excluding the page mask of course.
394 * @param fMask The AND mask - page flags X86_PDE_*.
395 * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
396 */
397PGM_SHW_DECL(int, ModifyPDEByIndex)(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask)
398{
399#if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE
400 /*
401 * Get page directory addresses and update the specified entry.
402 */
403 Assert(iPD < SHW_TOTAL_PD_ENTRIES);
404# if PGM_SHW_TYPE == PGM_TYPE_32BIT
405 PX86PDE pPdeDst = &CTXMID(pVM->pgm.s.p,32BitPD)->a[iPD];
406
407 pPdeDst->u = ((pPdeDst->u & ((X86PGUINT)fMask | SHW_PDE_PG_MASK)) | ((X86PGUINT)fFlags & ~SHW_PDE_PG_MASK));
408 Assert(!pPdeDst->n.u1Size);
409# else
410 PX86PDEPAE pPdeDst = &CTXMID(pVM->pgm.s.ap,PaePDs)[0]->a[iPD]; /* We treat this as a PD with 2048 entries. */
411
412 pPdeDst->u = (pPdeDst->u & (fMask | SHW_PDE_PG_MASK)) | (fFlags & ~SHW_PDE_PG_MASK);
413# endif
414 Assert(pPdeDst->n.u1Present);
415
416 return VINF_SUCCESS;
417#else
418 AssertFailed();
419 return VERR_NOT_IMPLEMENTED;
420#endif
421}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette