VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMMap.cpp@ 16210

Last change on this file since 16210 was 16203, checked in by vboxsync, 16 years ago

Updates in preparation for PGM pool based paging everywhere.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.5 KB
Line 
1/* $Id: PGMMap.cpp 16203 2009-01-23 16:36:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager, Guest Context Mappings.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include "PGMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37
38
39/*******************************************************************************
40* Internal Functions *
41*******************************************************************************/
42static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE);
43static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
44static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
45static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
46
47
48
49/**
50 * Creates a page table based mapping in GC.
51 *
52 * @returns VBox status code.
53 * @param pVM VM Handle.
54 * @param GCPtr Virtual Address. (Page table aligned!)
55 * @param cb Size of the range. Must be a 4MB aligned!
56 * @param pfnRelocate Relocation callback function.
57 * @param pvUser User argument to the callback.
58 * @param pszDesc Pointer to description string. This must not be freed.
59 */
60VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
61{
62 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, pfnRelocate, pvUser, pszDesc));
63 AssertMsg(pVM->pgm.s.pInterPD && pVM->pgm.s.pShwNestedRootR3, ("Paging isn't initialized, init order problems!\n"));
64
65 /*
66 * Validate input.
67 */
68 if (cb < _2M || cb > 64 * _1M)
69 {
70 AssertMsgFailed(("Serious? cb=%d\n", cb));
71 return VERR_INVALID_PARAMETER;
72 }
73 cb = RT_ALIGN_32(cb, _4M);
74 RTGCPTR GCPtrLast = GCPtr + cb - 1;
75 if (GCPtrLast < GCPtr)
76 {
77 AssertMsgFailed(("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast));
78 return VERR_INVALID_PARAMETER;
79 }
80 if (pVM->pgm.s.fMappingsFixed)
81 {
82 AssertMsgFailed(("Mappings are fixed! It's not possible to add new mappings at this time!\n"));
83 return VERR_PGM_MAPPINGS_FIXED;
84 }
85 if (!pfnRelocate)
86 {
87 AssertMsgFailed(("Callback is required\n"));
88 return VERR_INVALID_PARAMETER;
89 }
90
91 /*
92 * Find list location.
93 */
94 PPGMMAPPING pPrev = NULL;
95 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
96 while (pCur)
97 {
98 if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast)
99 {
100 AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
101 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
102 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
103 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
104 return VERR_PGM_MAPPING_CONFLICT;
105 }
106 if (pCur->GCPtr > GCPtr)
107 break;
108 pPrev = pCur;
109 pCur = pCur->pNextR3;
110 }
111
112 /*
113 * Check for conflicts with intermediate mappings.
114 */
115 const unsigned iPageDir = GCPtr >> X86_PD_SHIFT;
116 const unsigned cPTs = cb >> X86_PD_SHIFT;
117 if (pVM->pgm.s.fFinalizedMappings)
118 {
119 for (unsigned i = 0; i < cPTs; i++)
120 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)
121 {
122 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
123 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
124 return VERR_PGM_MAPPING_CONFLICT;
125 }
126 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */
127 }
128
129 /*
130 * Allocate and initialize the new list node.
131 */
132 PPGMMAPPING pNew;
133 int rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM, (void **)&pNew);
134 if (RT_FAILURE(rc))
135 return rc;
136 pNew->GCPtr = GCPtr;
137 pNew->GCPtrLast = GCPtrLast;
138 pNew->cb = cb;
139 pNew->pszDesc = pszDesc;
140 pNew->pfnRelocate = pfnRelocate;
141 pNew->pvUser = pvUser;
142 pNew->cPTs = cPTs;
143
144 /*
145 * Allocate page tables and insert them into the page directories.
146 * (One 32-bit PT and two PAE PTs.)
147 */
148 uint8_t *pbPTs;
149 rc = MMHyperAlloc(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM, (void **)&pbPTs);
150 if (RT_FAILURE(rc))
151 {
152 MMHyperFree(pVM, pNew);
153 return VERR_NO_MEMORY;
154 }
155
156 /*
157 * Init the page tables and insert them into the page directories.
158 */
159 Log4(("PGMR3MapPT: GCPtr=%RGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs));
160 for (unsigned i = 0; i < cPTs; i++)
161 {
162 /*
163 * 32-bit.
164 */
165 pNew->aPTs[i].pPTR3 = (PX86PT)pbPTs;
166 pNew->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pNew->aPTs[i].pPTR3);
167 pNew->aPTs[i].pPTR0 = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3);
168 pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3);
169 pbPTs += PAGE_SIZE;
170 Log4(("PGMR3MapPT: i=%d: pPTR3=%RHv pPTRC=%RRv pPRTR0=%RHv HCPhysPT=%RHp\n",
171 i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTRC, pNew->aPTs[i].pPTR0, pNew->aPTs[i].HCPhysPT));
172
173 /*
174 * PAE.
175 */
176 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs);
177 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE);
178 pNew->aPTs[i].paPaePTsR3 = (PX86PTPAE)pbPTs;
179 pNew->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pbPTs);
180 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs);
181 pbPTs += PAGE_SIZE * 2;
182 Log4(("PGMR3MapPT: i=%d: paPaePTsR#=%RHv paPaePTsRC=%RRv paPaePTsR#=%RHv HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n",
183 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsRC, pNew->aPTs[i].paPaePTsR0, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1));
184 }
185 if (pVM->pgm.s.fFinalizedMappings)
186 pgmR3MapSetPDEs(pVM, pNew, iPageDir);
187 /* else PGMR3FinalizeMappings() */
188
189 /*
190 * Insert the new mapping.
191 */
192 pNew->pNextR3 = pCur;
193 pNew->pNextRC = pCur ? MMHyperR3ToRC(pVM, pCur) : NIL_RTRCPTR;
194 pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : NIL_RTR0PTR;
195 if (pPrev)
196 {
197 pPrev->pNextR3 = pNew;
198 pPrev->pNextRC = MMHyperR3ToRC(pVM, pNew);
199 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew);
200 }
201 else
202 {
203 pVM->pgm.s.pMappingsR3 = pNew;
204 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pNew);
205 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew);
206 }
207
208 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
209 return VINF_SUCCESS;
210}
211
212
213/**
214 * Removes a page table based mapping.
215 *
216 * @returns VBox status code.
217 * @param pVM VM Handle.
218 * @param GCPtr Virtual Address. (Page table aligned!)
219 */
220VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
221{
222 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr));
223 AssertReturn(pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
224
225 /*
226 * Find it.
227 */
228 PPGMMAPPING pPrev = NULL;
229 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
230 while (pCur)
231 {
232 if (pCur->GCPtr == GCPtr)
233 {
234 /*
235 * Unlink it.
236 */
237 if (pPrev)
238 {
239 pPrev->pNextR3 = pCur->pNextR3;
240 pPrev->pNextRC = pCur->pNextRC;
241 pPrev->pNextR0 = pCur->pNextR0;
242 }
243 else
244 {
245 pVM->pgm.s.pMappingsR3 = pCur->pNextR3;
246 pVM->pgm.s.pMappingsRC = pCur->pNextRC;
247 pVM->pgm.s.pMappingsR0 = pCur->pNextR0;
248 }
249
250 /*
251 * Free the page table memory, clear page directory entries
252 * and free the page tables and node memory.
253 */
254 MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
255 pgmR3MapClearPDEs(&pVM->pgm.s, pCur, pCur->GCPtr >> X86_PD_SHIFT);
256 MMHyperFree(pVM, pCur);
257
258 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
259 return VINF_SUCCESS;
260 }
261
262 /* done? */
263 if (pCur->GCPtr > GCPtr)
264 break;
265
266 /* next */
267 pPrev = pCur;
268 pCur = pCur->pNextR3;
269 }
270
271 AssertMsgFailed(("No mapping for %#x found!\n", GCPtr));
272 return VERR_INVALID_PARAMETER;
273}
274
275
276/**
277 * Checks whether a range of PDEs in the intermediate
278 * memory context are unused.
279 *
280 * We're talking 32-bit PDEs here.
281 *
282 * @returns true/false.
283 * @param pVM Pointer to the shared VM structure.
284 * @param iPD The first PDE in the range.
285 * @param cPTs The number of PDEs in the range.
286 */
287DECLINLINE(bool) pgmR3AreIntermediatePDEsUnused(PVM pVM, unsigned iPD, unsigned cPTs)
288{
289 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
290 return false;
291 while (cPTs > 1)
292 {
293 iPD++;
294 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
295 return false;
296 cPTs--;
297 }
298 return true;
299}
300
301
302/**
303 * Unlinks the mapping.
304 *
305 * The mapping *must* be in the list.
306 *
307 * @param pVM Pointer to the shared VM structure.
308 * @param pMapping The mapping to unlink.
309 */
310static void pgmR3MapUnlink(PVM pVM, PPGMMAPPING pMapping)
311{
312 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
313 if (pAfterThis == pMapping)
314 {
315 /* head */
316 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
317 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
318 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
319 }
320 else
321 {
322 /* in the list */
323 while (pAfterThis->pNextR3 != pMapping)
324 {
325 pAfterThis = pAfterThis->pNextR3;
326 AssertReleaseReturnVoid(pAfterThis);
327 }
328
329 pAfterThis->pNextR3 = pMapping->pNextR3;
330 pAfterThis->pNextRC = pMapping->pNextRC;
331 pAfterThis->pNextR0 = pMapping->pNextR0;
332 }
333}
334
335
336/**
337 * Links the mapping.
338 *
339 * @param pVM Pointer to the shared VM structure.
340 * @param pMapping The mapping to linked.
341 */
342static void pgmR3MapLink(PVM pVM, PPGMMAPPING pMapping)
343{
344 /*
345 * Find the list location (it's sorted by GCPhys) and link it in.
346 */
347 if ( !pVM->pgm.s.pMappingsR3
348 || pVM->pgm.s.pMappingsR3->GCPtr > pMapping->GCPtr)
349 {
350 /* head */
351 pMapping->pNextR3 = pVM->pgm.s.pMappingsR3;
352 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
353 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
354 pVM->pgm.s.pMappingsR3 = pMapping;
355 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
356 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
357 }
358 else
359 {
360 /* in the list */
361 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
362 PPGMMAPPING pBeforeThis = pAfterThis->pNextR3;
363 while (pBeforeThis && pBeforeThis->GCPtr <= pMapping->GCPtr)
364 {
365 pAfterThis = pBeforeThis;
366 pBeforeThis = pBeforeThis->pNextR3;
367 }
368
369 pMapping->pNextR3 = pAfterThis->pNextR3;
370 pMapping->pNextRC = pAfterThis->pNextRC;
371 pMapping->pNextR0 = pAfterThis->pNextR0;
372 pAfterThis->pNextR3 = pMapping;
373 pAfterThis->pNextRC = MMHyperR3ToRC(pVM, pMapping);
374 pAfterThis->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
375 }
376}
377
378
379/**
380 * Finalizes the intermediate context.
381 *
382 * This is called at the end of the ring-3 init and will construct the
383 * intermediate paging structures, relocating all the mappings in the process.
384 *
385 * @returns VBox status code.
386 * @param pVM Pointer to the shared VM structure.
387 * @thread EMT(0)
388 */
389VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM)
390{
391 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
392 pVM->pgm.s.fFinalizedMappings = true;
393
394 /*
395 * Loop until all mappings have been finalized.
396 */
397 /*unsigned iPDNext = UINT32_C(0xc0000000) >> X86_PD_SHIFT;*/ /* makes CSAM/PATM freak out booting linux. :-/ */
398#if 0
399 unsigned iPDNext = MM_HYPER_AREA_ADDRESS >> X86_PD_SHIFT;
400#else
401 unsigned iPDNext = 1 << X86_PD_SHIFT; /* no hint, map them from the top. */
402#endif
403 PPGMMAPPING pCur;
404 do
405 {
406 pCur = pVM->pgm.s.pMappingsR3;
407 while (pCur)
408 {
409 if (!pCur->fFinalized)
410 {
411 /*
412 * Find a suitable location.
413 */
414 RTGCPTR const GCPtrOld = pCur->GCPtr;
415 const unsigned cPTs = pCur->cPTs;
416 unsigned iPDNew = iPDNext;
417 if ( iPDNew + cPTs >= X86_PG_ENTRIES /* exclude the last PD */
418 || !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
419 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
420 {
421 /* No luck, just scan down from 4GB-4MB, giving up at 4MB. */
422 iPDNew = X86_PG_ENTRIES - cPTs - 1;
423 while ( iPDNew > 0
424 && ( !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
425 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
426 )
427 iPDNew--;
428 AssertLogRelReturn(iPDNew != 0, VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
429 }
430
431 /*
432 * Relocate it (something akin to pgmR3MapRelocate).
433 */
434 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
435
436 /* unlink the mapping, update the entry and relink it. */
437 pgmR3MapUnlink(pVM, pCur);
438
439 RTGCPTR const GCPtrNew = (RTGCPTR)iPDNew << X86_PD_SHIFT;
440 pCur->GCPtr = GCPtrNew;
441 pCur->GCPtrLast = GCPtrNew + pCur->cb - 1;
442 pCur->fFinalized = true;
443
444 pgmR3MapLink(pVM, pCur);
445
446 /* Finally work the callback. */
447 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrNew, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
448
449 /*
450 * The list order might have changed, start from the beginning again.
451 */
452 iPDNext = iPDNew + cPTs;
453 break;
454 }
455
456 /* next */
457 pCur = pCur->pNextR3;
458 }
459 } while (pCur);
460
461 return VINF_SUCCESS;
462}
463
464
465/**
466 * Gets the size of the current guest mappings if they were to be
467 * put next to oneanother.
468 *
469 * @returns VBox status code.
470 * @param pVM The VM.
471 * @param pcb Where to store the size.
472 */
473VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
474{
475 RTGCPTR cb = 0;
476 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
477 cb += pCur->cb;
478
479 *pcb = cb;
480 AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG);
481 Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb));
482 return VINF_SUCCESS;
483}
484
485
486/**
487 * Fixes the guest context mappings in a range reserved from the Guest OS.
488 *
489 * @returns VBox status code.
490 * @param pVM The VM.
491 * @param GCPtrBase The address of the reserved range of guest memory.
492 * @param cb The size of the range starting at GCPtrBase.
493 */
494VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
495{
496 Log(("PGMR3MappingsFix: GCPtrBase=%#x cb=%#x\n", GCPtrBase, cb));
497
498 /* Ignore the additions mapping fix call in VT-x/AMD-V. */
499 if ( pVM->pgm.s.fMappingsFixed
500 && HWACCMR3IsActive(pVM))
501 return VINF_SUCCESS;
502
503 /*
504 * This is all or nothing at all. So, a tiny bit of paranoia first.
505 */
506 if (GCPtrBase & X86_PAGE_4M_OFFSET_MASK)
507 {
508 AssertMsgFailed(("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase));
509 return VERR_INVALID_PARAMETER;
510 }
511 if (!cb || (cb & X86_PAGE_4M_OFFSET_MASK))
512 {
513 AssertMsgFailed(("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb));
514 return VERR_INVALID_PARAMETER;
515 }
516
517 /*
518 * Before we do anything we'll do a forced PD sync to try make sure any
519 * pending relocations because of these mappings have been resolved.
520 */
521 PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), true);
522
523 /*
524 * Check that it's not conflicting with a core code mapping in the intermediate page table.
525 */
526 unsigned iPDNew = GCPtrBase >> X86_PD_SHIFT;
527 unsigned i = cb >> X86_PD_SHIFT;
528 while (i-- > 0)
529 {
530 if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)
531 {
532 /* Check that it's not one or our mappings. */
533 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
534 while (pCur)
535 {
536 if (iPDNew + i - (pCur->GCPtr >> X86_PD_SHIFT) < (pCur->cb >> X86_PD_SHIFT))
537 break;
538 pCur = pCur->pNextR3;
539 }
540 if (!pCur)
541 {
542 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
543 iPDNew + i, GCPtrBase, cb));
544 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
545 }
546 }
547 }
548
549 /*
550 * In PAE / PAE mode, make sure we don't cross page directories.
551 */
552 if ( ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
553 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX)
554 && ( pVM->pgm.s.enmShadowMode == PGMMODE_PAE
555 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX))
556 {
557 unsigned iPdptBase = GCPtrBase >> X86_PDPT_SHIFT;
558 unsigned iPdptLast = (GCPtrBase + cb - 1) >> X86_PDPT_SHIFT;
559 if (iPdptBase != iPdptLast)
560 {
561 LogRel(("PGMR3MappingsFix: Crosses PD boundrary; iPdptBase=%#x iPdptLast=%#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
562 iPdptBase, iPdptLast, GCPtrBase, cb));
563 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
564 }
565 }
566
567 /*
568 * Loop the mappings and check that they all agree on their new locations.
569 */
570 RTGCPTR GCPtrCur = GCPtrBase;
571 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
572 while (pCur)
573 {
574 if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
575 {
576 AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));
577 return VERR_PGM_MAPPINGS_FIX_REJECTED;
578 }
579 /* next */
580 GCPtrCur += pCur->cb;
581 pCur = pCur->pNextR3;
582 }
583 if (GCPtrCur > GCPtrBase + cb)
584 {
585 AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));
586 return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;
587 }
588
589 /*
590 * Loop the table assigning the mappings to the passed in memory
591 * and call their relocator callback.
592 */
593 GCPtrCur = GCPtrBase;
594 pCur = pVM->pgm.s.pMappingsR3;
595 while (pCur)
596 {
597 unsigned iPDOld = pCur->GCPtr >> X86_PD_SHIFT;
598 iPDNew = GCPtrCur >> X86_PD_SHIFT;
599
600 /*
601 * Relocate the page table(s).
602 */
603 pgmR3MapClearPDEs(&pVM->pgm.s, pCur, iPDOld);
604 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
605
606 /*
607 * Update the entry.
608 */
609 pCur->GCPtr = GCPtrCur;
610 pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;
611
612 /*
613 * Callback to execute the relocation.
614 */
615 pCur->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
616
617 /*
618 * Advance.
619 */
620 GCPtrCur += pCur->cb;
621 pCur = pCur->pNextR3;
622 }
623
624#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
625 /*
626 * Turn off CR3 updating monitoring.
627 */
628 int rc2 = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);
629 AssertRC(rc2);
630#endif
631
632 /*
633 * Mark the mappings as fixed and return.
634 */
635 pVM->pgm.s.fMappingsFixed = true;
636 pVM->pgm.s.GCPtrMappingFixed = GCPtrBase;
637 pVM->pgm.s.cbMappingFixed = cb;
638 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
639 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
640 return VINF_SUCCESS;
641}
642
643
644/**
645 * Unfixes the mappings.
646 * After calling this function mapping conflict detection will be enabled.
647 *
648 * @returns VBox status code.
649 * @param pVM The VM.
650 */
651VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
652{
653 Log(("PGMR3MappingsUnfix: fMappingsFixed=%d\n", pVM->pgm.s.fMappingsFixed));
654
655 /* Refuse in VT-x/AMD-V mode. */
656 if (HWACCMR3IsActive(pVM))
657 return VINF_SUCCESS;
658
659 pVM->pgm.s.fMappingsFixed = false;
660 pVM->pgm.s.GCPtrMappingFixed = 0;
661 pVM->pgm.s.cbMappingFixed = 0;
662 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
663
664 /*
665 * Re-enable the CR3 monitoring.
666 *
667 * Paranoia: We flush the page pool before doing that because Windows
668 * is using the CR3 page both as a PD and a PT, e.g. the pool may
669 * be monitoring it.
670 */
671#ifdef PGMPOOL_WITH_MONITORING
672 pgmPoolFlushAll(pVM);
673#endif
674 /* Remap CR3 as we have just flushed the CR3 shadow PML4 in case we're in long mode. */
675 int rc = PGM_GST_PFN(MapCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
676 AssertRCSuccess(rc);
677
678#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
679 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
680 AssertRCSuccess(rc);
681#endif
682 return VINF_SUCCESS;
683}
684
685
686/**
687 * Map pages into the intermediate context (switcher code).
688 * These pages are mapped at both the give virtual address and at
689 * the physical address (for identity mapping).
690 *
691 * @returns VBox status code.
692 * @param pVM The virtual machine.
693 * @param Addr Intermediate context address of the mapping.
694 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!
695 * @param cbPages Number of bytes to map.
696 *
697 * @remark This API shall not be used to anything but mapping the switcher code.
698 */
699VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)
700{
701 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%RHp cbPages=%#x\n", Addr, HCPhys, cbPages));
702
703 /*
704 * Adjust input.
705 */
706 cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;
707 cbPages = RT_ALIGN(cbPages, PAGE_SIZE);
708 HCPhys &= X86_PTE_PAE_PG_MASK;
709 Addr &= PAGE_BASE_MASK;
710 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */
711 uint32_t uAddress = (uint32_t)Addr;
712
713 /*
714 * Assert input and state.
715 */
716 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
717 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));
718 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));
719 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages));
720 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
721
722 /*
723 * Check for internal conflicts between the virtual address and the physical address.
724 * A 1:1 mapping is fine, but partial overlapping is a no-no.
725 */
726 if ( uAddress != HCPhys
727 && ( uAddress < HCPhys
728 ? HCPhys - uAddress < cbPages
729 : uAddress - HCPhys < cbPages
730 )
731 )
732 AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages),
733 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
734
735 const unsigned cPages = cbPages >> PAGE_SHIFT;
736 int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
737 if (RT_FAILURE(rc))
738 return rc;
739 rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
740 if (RT_FAILURE(rc))
741 return rc;
742
743 /*
744 * Everythings fine, do the mapping.
745 */
746 pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
747 pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
748
749 return VINF_SUCCESS;
750}
751
752
753/**
754 * Validates that there are no conflicts for this mapping into the intermediate context.
755 *
756 * @returns VBox status code.
757 * @param pVM VM handle.
758 * @param uAddress Address of the mapping.
759 * @param cPages Number of pages.
760 * @param pPTDefault Pointer to the default page table for this mapping.
761 * @param pPTPaeDefault Pointer to the default page table for this mapping.
762 */
763static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
764{
765 AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme\n"));
766
767 /*
768 * Check that the ranges are available.
769 * (This code doesn't have to be fast.)
770 */
771 while (cPages > 0)
772 {
773 /*
774 * 32-Bit.
775 */
776 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
777 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
778 PX86PT pPT = pPTDefault;
779 if (pVM->pgm.s.pInterPD->a[iPDE].u)
780 {
781 RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;
782 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))
783 pPT = pVM->pgm.s.apInterPTs[0];
784 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))
785 pPT = pVM->pgm.s.apInterPTs[1];
786 else
787 {
788 /** @todo this must be handled with a relocation of the conflicting mapping!
789 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
790 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
791 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
792 }
793 }
794 if (pPT->a[iPTE].u)
795 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u),
796 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
797
798 /*
799 * PAE.
800 */
801 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
802 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
803 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
804 Assert(iPDPE < 4);
805 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
806 PX86PTPAE pPTPae = pPTPaeDefault;
807 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
808 {
809 RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;
810 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
811 pPTPae = pVM->pgm.s.apInterPaePTs[0];
812 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
813 pPTPae = pVM->pgm.s.apInterPaePTs[1];
814 else
815 {
816 /** @todo this must be handled with a relocation of the conflicting mapping!
817 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
818 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
819 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
820 }
821 }
822 if (pPTPae->a[iPTE].u)
823 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u),
824 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
825
826 /* next */
827 uAddress += PAGE_SIZE;
828 cPages--;
829 }
830
831 return VINF_SUCCESS;
832}
833
834
835
836/**
837 * Sets up the intermediate page tables for a verified mapping.
838 *
839 * @param pVM VM handle.
840 * @param uAddress Address of the mapping.
841 * @param HCPhys The physical address of the page range.
842 * @param cPages Number of pages.
843 * @param pPTDefault Pointer to the default page table for this mapping.
844 * @param pPTPaeDefault Pointer to the default page table for this mapping.
845 */
846static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
847{
848 while (cPages > 0)
849 {
850 /*
851 * 32-Bit.
852 */
853 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
854 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
855 PX86PT pPT;
856 if (pVM->pgm.s.pInterPD->a[iPDE].u)
857 pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);
858 else
859 {
860 pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
861 | (uint32_t)MMPage2Phys(pVM, pPTDefault);
862 pPT = pPTDefault;
863 }
864 pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;
865
866 /*
867 * PAE
868 */
869 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
870 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
871 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
872 Assert(iPDPE < 4);
873 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
874 PX86PTPAE pPTPae;
875 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
876 pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);
877 else
878 {
879 pPTPae = pPTPaeDefault;
880 pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
881 | MMPage2Phys(pVM, pPTPaeDefault);
882 }
883 pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;
884
885 /* next */
886 cPages--;
887 HCPhys += PAGE_SIZE;
888 uAddress += PAGE_SIZE;
889 }
890}
891
892
893/**
894 * Clears all PDEs involved with the mapping.
895 *
896 * @param pPGM Pointer to the PGM instance data.
897 * @param pMap Pointer to the mapping in question.
898 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
899 */
900static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE)
901{
902 unsigned i = pMap->cPTs;
903 iOldPDE += i;
904 while (i-- > 0)
905 {
906 iOldPDE--;
907
908 /*
909 * 32-bit.
910 */
911 pPGM->pInterPD->a[iOldPDE].u = 0;
912#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
913 pPGM->pShw32BitPdR3->a[iOldPDE].u = 0;
914#endif
915 /*
916 * PAE.
917 */
918 const unsigned iPD = iOldPDE / 256;
919 unsigned iPDE = iOldPDE * 2 % 512;
920 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
921#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
922 pPGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0;
923#endif
924 iPDE++;
925 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
926#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
927 pPGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0;
928
929 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
930 pPGM->pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
931#endif
932 }
933}
934
935
936/**
937 * Sets all PDEs involved with the mapping.
938 *
939 * @param pVM The VM handle.
940 * @param pMap Pointer to the mapping in question.
941 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
942 */
943static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
944{
945 PPGM pPGM = &pVM->pgm.s;
946
947 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX);
948
949 /*
950 * Init the page tables and insert them into the page directories.
951 */
952 unsigned i = pMap->cPTs;
953 iNewPDE += i;
954 while (i-- > 0)
955 {
956 iNewPDE--;
957
958 /*
959 * 32-bit.
960 */
961#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
962 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)
963 && pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present)
964 pgmPoolFree(pVM, pPGM->pShw32BitPdR3->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE);
965#endif
966 X86PDE Pde;
967 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
968 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
969 pPGM->pInterPD->a[iNewPDE] = Pde;
970#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
971 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
972 pPGM->pShw32BitPdR3->a[iNewPDE] = Pde;
973#endif
974 /*
975 * PAE.
976 */
977 const unsigned iPD = iNewPDE / 256;
978 unsigned iPDE = iNewPDE * 2 % 512;
979#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
980 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)
981 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
982 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2);
983#endif
984 X86PDEPAE PdePae0;
985 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
986 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
987#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
988 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
989 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae0;
990#endif
991 iPDE++;
992#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
993 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)
994 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
995 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1);
996#endif
997 X86PDEPAE PdePae1;
998 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
999 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
1000#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1001 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
1002 {
1003 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae1;
1004
1005 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
1006 pPGM->pShwPaePdptR3->a[iPD].u |= PGM_PLXFLAGS_MAPPING;
1007 }
1008#endif
1009 }
1010}
1011
1012
1013/**
1014 * Relocates a mapping to a new address.
1015 *
1016 * @param pVM VM handle.
1017 * @param pMapping The mapping to relocate.
1018 * @param GCPtrOldMapping The address of the start of the old mapping.
1019 * @param GCPtrNewMapping The address of the start of the new mapping.
1020 */
1021void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping)
1022{
1023 unsigned iPDOld = GCPtrOldMapping >> X86_PD_SHIFT;
1024 unsigned iPDNew = GCPtrNewMapping >> X86_PD_SHIFT;
1025
1026 Log(("PGM: Relocating %s from %RGv to %RGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping));
1027 Assert(((unsigned)iPDOld << X86_PD_SHIFT) == pMapping->GCPtr);
1028
1029 /*
1030 * Relocate the page table(s).
1031 */
1032 pgmR3MapClearPDEs(&pVM->pgm.s, pMapping, iPDOld);
1033 pgmR3MapSetPDEs(pVM, pMapping, iPDNew);
1034
1035 /*
1036 * Update and resort the mapping list.
1037 */
1038
1039 /* Find previous mapping for pMapping, put result into pPrevMap. */
1040 PPGMMAPPING pPrevMap = NULL;
1041 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
1042 while (pCur && pCur != pMapping)
1043 {
1044 /* next */
1045 pPrevMap = pCur;
1046 pCur = pCur->pNextR3;
1047 }
1048 Assert(pCur);
1049
1050 /* Find mapping which >= than pMapping. */
1051 RTGCPTR GCPtrNew = iPDNew << X86_PD_SHIFT;
1052 PPGMMAPPING pPrev = NULL;
1053 pCur = pVM->pgm.s.pMappingsR3;
1054 while (pCur && pCur->GCPtr < GCPtrNew)
1055 {
1056 /* next */
1057 pPrev = pCur;
1058 pCur = pCur->pNextR3;
1059 }
1060
1061 if (pCur != pMapping && pPrev != pMapping)
1062 {
1063 /*
1064 * Unlink.
1065 */
1066 if (pPrevMap)
1067 {
1068 pPrevMap->pNextR3 = pMapping->pNextR3;
1069 pPrevMap->pNextRC = pMapping->pNextRC;
1070 pPrevMap->pNextR0 = pMapping->pNextR0;
1071 }
1072 else
1073 {
1074 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
1075 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
1076 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
1077 }
1078
1079 /*
1080 * Link
1081 */
1082 pMapping->pNextR3 = pCur;
1083 if (pPrev)
1084 {
1085 pMapping->pNextRC = pPrev->pNextRC;
1086 pMapping->pNextR0 = pPrev->pNextR0;
1087 pPrev->pNextR3 = pMapping;
1088 pPrev->pNextRC = MMHyperR3ToRC(pVM, pMapping);
1089 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
1090 }
1091 else
1092 {
1093 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
1094 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
1095 pVM->pgm.s.pMappingsR3 = pMapping;
1096 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
1097 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
1098 }
1099 }
1100
1101 /*
1102 * Update the entry.
1103 */
1104 pMapping->GCPtr = GCPtrNew;
1105 pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;
1106
1107 /*
1108 * Callback to execute the relocation.
1109 */
1110 pMapping->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);
1111}
1112
1113
1114/**
1115 * Resolves a conflict between a page table based GC mapping and
1116 * the Guest OS page tables. (32 bits version)
1117 *
1118 * @returns VBox status code.
1119 * @param pVM VM Handle.
1120 * @param pMapping The mapping which conflicts.
1121 * @param pPDSrc The page directory of the guest OS.
1122 * @param GCPtrOldMapping The address of the start of the current mapping.
1123 */
1124int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping)
1125{
1126 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1127
1128 /*
1129 * Scan for free page directory entries.
1130 *
1131 * Note that we do not support mappings at the very end of the
1132 * address space since that will break our GCPtrEnd assumptions.
1133 */
1134 const unsigned cPTs = pMapping->cPTs;
1135 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1136 while (iPDNew-- > 0)
1137 {
1138 if (pPDSrc->a[iPDNew].n.u1Present)
1139 continue;
1140 if (cPTs > 1)
1141 {
1142 bool fOk = true;
1143 for (unsigned i = 1; fOk && i < cPTs; i++)
1144 if (pPDSrc->a[iPDNew + i].n.u1Present)
1145 fOk = false;
1146 if (!fOk)
1147 continue;
1148 }
1149
1150 /*
1151 * Check that it's not conflicting with an intermediate page table mapping.
1152 */
1153 bool fOk = true;
1154 unsigned i = cPTs;
1155 while (fOk && i-- > 0)
1156 fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;
1157 if (!fOk)
1158 continue;
1159 /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */
1160
1161 /*
1162 * Ask for the mapping.
1163 */
1164 RTGCPTR GCPtrNewMapping = iPDNew << X86_PD_SHIFT;
1165
1166 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1167 {
1168 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1169 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1170 return VINF_SUCCESS;
1171 }
1172 }
1173
1174 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1175 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs));
1176 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1177}
1178
1179
1180/**
1181 * Resolves a conflict between a page table based GC mapping and
1182 * the Guest OS page tables. (PAE bits version)
1183 *
1184 * @returns VBox status code.
1185 * @param pVM VM Handle.
1186 * @param pMapping The mapping which conflicts.
1187 * @param GCPtrOldMapping The address of the start of the current mapping.
1188 */
1189int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping)
1190{
1191 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1192
1193 for (int iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--)
1194 {
1195 unsigned iPDSrc;
1196 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
1197
1198 /*
1199 * Scan for free page directory entries.
1200 *
1201 * Note that we do not support mappings at the very end of the
1202 * address space since that will break our GCPtrEnd assumptions.
1203 * Nor do we support mappings crossing page directories.
1204 */
1205 const unsigned cPTs = pMapping->cb >> X86_PD_PAE_SHIFT;
1206 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1207
1208 while (iPDNew-- > 0)
1209 {
1210 /* Ugly assumption that mappings start on a 4 MB boundary. */
1211 if (iPDNew & 1)
1212 continue;
1213
1214 if (pPDSrc)
1215 {
1216 if (pPDSrc->a[iPDNew].n.u1Present)
1217 continue;
1218 if (cPTs > 1)
1219 {
1220 bool fOk = true;
1221 for (unsigned i = 1; fOk && i < cPTs; i++)
1222 if (pPDSrc->a[iPDNew + i].n.u1Present)
1223 fOk = false;
1224 if (!fOk)
1225 continue;
1226 }
1227 }
1228 /*
1229 * Check that it's not conflicting with an intermediate page table mapping.
1230 */
1231 bool fOk = true;
1232 unsigned i = cPTs;
1233 while (fOk && i-- > 0)
1234 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present;
1235 if (!fOk)
1236 continue;
1237
1238 /*
1239 * Ask for the mapping.
1240 */
1241 RTGCPTR GCPtrNewMapping = ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT);
1242
1243 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1244 {
1245 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1246 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1247 return VINF_SUCCESS;
1248 }
1249 }
1250 }
1251 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1252 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT));
1253 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1254}
1255
1256
1257/**
1258 * Checks guest PD for conflicts with VMM GC mappings.
1259 *
1260 * @returns true if conflict detected.
1261 * @returns false if not.
1262 * @param pVM The virtual machine.
1263 * @param cr3 Guest context CR3 register.
1264 * @param fRawR0 Whether RawR0 is enabled or not.
1265 */
1266VMMR3DECL(bool) PGMR3MapHasConflicts(PVM pVM, uint64_t cr3, bool fRawR0) /** @todo how many HasConflict constructs do we really need? */
1267{
1268 /*
1269 * Can skip this if mappings are safely fixed.
1270 */
1271 if (pVM->pgm.s.fMappingsFixed)
1272 return false;
1273
1274 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
1275 Assert(enmGuestMode <= PGMMODE_PAE_NX);
1276
1277 /*
1278 * Iterate mappings.
1279 */
1280 if (enmGuestMode == PGMMODE_32_BIT)
1281 {
1282 /*
1283 * Resolve the page directory.
1284 */
1285 PX86PD pPD = pVM->pgm.s.pGst32BitPdR3;
1286 Assert(pPD);
1287 Assert(pPD == (PX86PD)PGMPhysGCPhys2R3PtrAssert(pVM, cr3 & X86_CR3_PAGE_MASK, sizeof(*pPD)));
1288
1289 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1290 {
1291 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
1292 unsigned iPT = pCur->cPTs;
1293 while (iPT-- > 0)
1294 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
1295 && (fRawR0 || pPD->a[iPDE + iPT].n.u1User))
1296 {
1297 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
1298 Log(("PGMR3HasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
1299 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
1300 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
1301 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
1302 return true;
1303 }
1304 }
1305 }
1306 else if ( enmGuestMode == PGMMODE_PAE
1307 || enmGuestMode == PGMMODE_PAE_NX)
1308 {
1309 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1310 {
1311 RTGCPTR GCPtr = pCur->GCPtr;
1312
1313 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
1314 while (iPT-- > 0)
1315 {
1316 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
1317
1318 if ( Pde.n.u1Present
1319 && (fRawR0 || Pde.n.u1User))
1320 {
1321 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
1322 Log(("PGMR3HasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
1323 " PDE=%016RX64.\n",
1324 GCPtr, pCur->pszDesc, Pde.u));
1325 return true;
1326 }
1327 GCPtr += (1 << X86_PD_PAE_SHIFT);
1328 }
1329 }
1330 }
1331 else
1332 AssertFailed();
1333
1334 return false;
1335}
1336
1337#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1338/**
1339 * Apply the hypervisor mappings to the active CR3.
1340 *
1341 * @returns VBox status.
1342 * @param pVM The virtual machine.
1343 */
1344VMMR3DECL(int) PGMR3MapActivate(PVM pVM)
1345{
1346 /*
1347 * Can skip this if mappings are safely fixed.
1348 */
1349 if (pVM->pgm.s.fMappingsFixed)
1350 return VINF_SUCCESS;
1351
1352 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
1353 Assert(enmGuestMode <= PGMMODE_PAE_NX);
1354
1355 /*
1356 * Iterate mappings.
1357 */
1358 if (enmGuestMode == PGMMODE_32_BIT)
1359 {
1360 /*
1361 * Resolve the page directory.
1362 */
1363 PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3;
1364 Assert(pPD);
1365
1366 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1367 {
1368 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
1369 unsigned iPT = pCur->cPTs;
1370 while (iPT-- > 0)
1371 pPD->a[iPDE + iPT].u = 0;
1372 }
1373 }
1374 else if ( enmGuestMode == PGMMODE_PAE
1375 || enmGuestMode == PGMMODE_PAE_NX)
1376 {
1377 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1378 {
1379 RTGCPTR GCPtr = pCur->GCPtr;
1380 unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1381
1382 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
1383 while (iPT-- > 0)
1384 {
1385 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr);
1386 pPDE->u = 0;
1387
1388 GCPtr += (1 << X86_PD_PAE_SHIFT);
1389 }
1390 }
1391 }
1392 else
1393 AssertFailed();
1394
1395 return VINF_SUCCESS;
1396}
1397
1398/**
1399 * Remove the hypervisor mappings from the active CR3
1400 *
1401 * @returns VBox status.
1402 * @param pVM The virtual machine.
1403 */
1404VMMR3DECL(int) PGMR3MapDeactivate(PVM pVM)
1405{
1406 /*
1407 * Can skip this if mappings are safely fixed.
1408 */
1409 if (pVM->pgm.s.fMappingsFixed)
1410 return VINF_SUCCESS;
1411
1412 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
1413 Assert(enmGuestMode <= PGMMODE_PAE_NX);
1414
1415 /*
1416 * Iterate mappings.
1417 */
1418 if (enmGuestMode == PGMMODE_32_BIT)
1419 {
1420 /*
1421 * Resolve the page directory.
1422 */
1423 PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3;
1424 Assert(pPD);
1425
1426 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1427 {
1428 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
1429 unsigned iPT = pCur->cPTs;
1430 while (iPT-- > 0)
1431 pPD->a[iPDE + iPT].u = 0;
1432 }
1433 }
1434 else if ( enmGuestMode == PGMMODE_PAE
1435 || enmGuestMode == PGMMODE_PAE_NX)
1436 {
1437 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1438 {
1439 RTGCPTR GCPtr = pCur->GCPtr;
1440
1441 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
1442 while (iPT-- > 0)
1443 {
1444 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr);
1445 pPDE->u = 0;
1446
1447 GCPtr += (1 << X86_PD_PAE_SHIFT);
1448 }
1449 }
1450
1451 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entries. (legacy PAE guest mode) */
1452 PX86PDPT pPdpt = (PX86PDPT)pVM->pgm.s.pShwPageCR3R3;
1453 for (unsigned i=0;i<X86_PG_PAE_PDPE_ENTRIES;i++)
1454 pPdpt->a[i].u &= ~PGM_PLXFLAGS_MAPPING;
1455 }
1456 else
1457 AssertFailed();
1458
1459 return VINF_SUCCESS;
1460}
1461#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
1462
1463/**
1464 * Read memory from the guest mappings.
1465 *
1466 * This will use the page tables associated with the mappings to
1467 * read the memory. This means that not all kind of memory is readable
1468 * since we don't necessarily know how to convert that physical address
1469 * to a HC virtual one.
1470 *
1471 * @returns VBox status.
1472 * @param pVM VM handle.
1473 * @param pvDst The destination address (HC of course).
1474 * @param GCPtrSrc The source address (GC virtual address).
1475 * @param cb Number of bytes to read.
1476 *
1477 * @remarks The is indirectly for DBGF only.
1478 * @todo Consider renaming it to indicate it's special usage, or just
1479 * reimplement it in MMR3HyperReadGCVirt.
1480 */
1481VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1482{
1483 /*
1484 * Simplicity over speed... Chop the request up into chunks
1485 * which don't cross pages.
1486 */
1487 if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)
1488 {
1489 for (;;)
1490 {
1491 size_t cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));
1492 int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);
1493 if (RT_FAILURE(rc))
1494 return rc;
1495 cb -= cbRead;
1496 if (!cb)
1497 break;
1498 pvDst = (char *)pvDst + cbRead;
1499 GCPtrSrc += cbRead;
1500 }
1501 return VINF_SUCCESS;
1502 }
1503
1504 /*
1505 * Find the mapping.
1506 */
1507 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
1508 while (pCur)
1509 {
1510 RTGCPTR off = GCPtrSrc - pCur->GCPtr;
1511 if (off < pCur->cb)
1512 {
1513 if (off + cb > pCur->cb)
1514 {
1515 AssertMsgFailed(("Invalid page range %RGv LB%#x. mapping '%s' %RGv to %RGv\n",
1516 GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));
1517 return VERR_INVALID_PARAMETER;
1518 }
1519
1520 unsigned iPT = off >> X86_PD_SHIFT;
1521 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
1522 while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))
1523 {
1524 if (!CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].n.u1Present)
1525 return VERR_PAGE_NOT_PRESENT;
1526 RTHCPHYS HCPhys = CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].u & X86_PTE_PAE_PG_MASK;
1527
1528 /*
1529 * Get the virtual page from the physical one.
1530 */
1531 void *pvPage;
1532 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);
1533 if (RT_FAILURE(rc))
1534 return rc;
1535
1536 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
1537 return VINF_SUCCESS;
1538 }
1539 }
1540
1541 /* next */
1542 pCur = CTXALLSUFF(pCur->pNext);
1543 }
1544
1545 return VERR_INVALID_POINTER;
1546}
1547
1548
1549/**
1550 * Info callback for 'pgmhandlers'.
1551 *
1552 * @param pHlp The output helpers.
1553 * @param pszArgs The arguments. phys or virt.
1554 */
1555DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1556{
1557 pHlp->pfnPrintf(pHlp, pVM->pgm.s.fMappingsFixed
1558 ? "\nThe mappings are FIXED.\n"
1559 : "\nThe mappings are FLOATING.\n");
1560 PPGMMAPPING pCur;
1561 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1562 pHlp->pfnPrintf(pHlp, "%RGv - %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
1563}
1564
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette