VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMMap.cpp@ 17639

Last change on this file since 17639 was 17622, checked in by vboxsync, 16 years ago

Simple check added to make sure we don't bounce back and forth between hypervisor mappings that have caused conflicts in the past.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 45.9 KB
Line 
1/* $Id: PGMMap.cpp 17622 2009-03-10 12:32:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager, Guest Context Mappings.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include "PGMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37
38
39/*******************************************************************************
40* Internal Functions *
41*******************************************************************************/
42static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
43static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
44static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
45static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
46
47
48/**
49 * Creates a page table based mapping in GC.
50 *
51 * @returns VBox status code.
52 * @param pVM VM Handle.
53 * @param GCPtr Virtual Address. (Page table aligned!)
54 * @param cb Size of the range. Must be a 4MB aligned!
55 * @param pfnRelocate Relocation callback function.
56 * @param pvUser User argument to the callback.
57 * @param pszDesc Pointer to description string. This must not be freed.
58 */
59VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
60{
61 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, pfnRelocate, pvUser, pszDesc));
62 AssertMsg(pVM->pgm.s.pInterPD, ("Paging isn't initialized, init order problems!\n"));
63
64 /*
65 * Validate input.
66 */
67 if (cb < _2M || cb > 64 * _1M)
68 {
69 AssertMsgFailed(("Serious? cb=%d\n", cb));
70 return VERR_INVALID_PARAMETER;
71 }
72 cb = RT_ALIGN_32(cb, _4M);
73 RTGCPTR GCPtrLast = GCPtr + cb - 1;
74 if (GCPtrLast < GCPtr)
75 {
76 AssertMsgFailed(("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast));
77 return VERR_INVALID_PARAMETER;
78 }
79 if (pVM->pgm.s.fMappingsFixed)
80 {
81 AssertMsgFailed(("Mappings are fixed! It's not possible to add new mappings at this time!\n"));
82 return VERR_PGM_MAPPINGS_FIXED;
83 }
84 if (!pfnRelocate)
85 {
86 AssertMsgFailed(("Callback is required\n"));
87 return VERR_INVALID_PARAMETER;
88 }
89
90 /*
91 * Find list location.
92 */
93 PPGMMAPPING pPrev = NULL;
94 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
95 while (pCur)
96 {
97 if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast)
98 {
99 AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
100 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
101 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
102 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
103 return VERR_PGM_MAPPING_CONFLICT;
104 }
105 if (pCur->GCPtr > GCPtr)
106 break;
107 pPrev = pCur;
108 pCur = pCur->pNextR3;
109 }
110
111 /*
112 * Check for conflicts with intermediate mappings.
113 */
114 const unsigned iPageDir = GCPtr >> X86_PD_SHIFT;
115 const unsigned cPTs = cb >> X86_PD_SHIFT;
116 if (pVM->pgm.s.fFinalizedMappings)
117 {
118 for (unsigned i = 0; i < cPTs; i++)
119 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)
120 {
121 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
122 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
123 return VERR_PGM_MAPPING_CONFLICT;
124 }
125 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */
126 }
127
128 /*
129 * Allocate and initialize the new list node.
130 */
131 PPGMMAPPING pNew;
132 int rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM, (void **)&pNew);
133 if (RT_FAILURE(rc))
134 return rc;
135 pNew->GCPtr = GCPtr;
136 pNew->GCPtrLast = GCPtrLast;
137 pNew->cb = cb;
138 pNew->pszDesc = pszDesc;
139 pNew->pfnRelocate = pfnRelocate;
140 pNew->pvUser = pvUser;
141 pNew->cPTs = cPTs;
142
143 /*
144 * Allocate page tables and insert them into the page directories.
145 * (One 32-bit PT and two PAE PTs.)
146 */
147 uint8_t *pbPTs;
148 rc = MMHyperAlloc(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM, (void **)&pbPTs);
149 if (RT_FAILURE(rc))
150 {
151 MMHyperFree(pVM, pNew);
152 return VERR_NO_MEMORY;
153 }
154
155 /*
156 * Init the page tables and insert them into the page directories.
157 */
158 Log4(("PGMR3MapPT: GCPtr=%RGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs));
159 for (unsigned i = 0; i < cPTs; i++)
160 {
161 /*
162 * 32-bit.
163 */
164 pNew->aPTs[i].pPTR3 = (PX86PT)pbPTs;
165 pNew->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pNew->aPTs[i].pPTR3);
166 pNew->aPTs[i].pPTR0 = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3);
167 pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3);
168 pbPTs += PAGE_SIZE;
169 Log4(("PGMR3MapPT: i=%d: pPTR3=%RHv pPTRC=%RRv pPRTR0=%RHv HCPhysPT=%RHp\n",
170 i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTRC, pNew->aPTs[i].pPTR0, pNew->aPTs[i].HCPhysPT));
171
172 /*
173 * PAE.
174 */
175 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs);
176 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE);
177 pNew->aPTs[i].paPaePTsR3 = (PX86PTPAE)pbPTs;
178 pNew->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pbPTs);
179 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs);
180 pbPTs += PAGE_SIZE * 2;
181 Log4(("PGMR3MapPT: i=%d: paPaePTsR#=%RHv paPaePTsRC=%RRv paPaePTsR#=%RHv HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n",
182 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsRC, pNew->aPTs[i].paPaePTsR0, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1));
183 }
184 if (pVM->pgm.s.fFinalizedMappings)
185 pgmR3MapSetPDEs(pVM, pNew, iPageDir);
186 /* else PGMR3FinalizeMappings() */
187
188 /*
189 * Insert the new mapping.
190 */
191 pNew->pNextR3 = pCur;
192 pNew->pNextRC = pCur ? MMHyperR3ToRC(pVM, pCur) : NIL_RTRCPTR;
193 pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : NIL_RTR0PTR;
194 if (pPrev)
195 {
196 pPrev->pNextR3 = pNew;
197 pPrev->pNextRC = MMHyperR3ToRC(pVM, pNew);
198 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew);
199 }
200 else
201 {
202 pVM->pgm.s.pMappingsR3 = pNew;
203 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pNew);
204 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew);
205 }
206
207 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
208 return VINF_SUCCESS;
209}
210
211
212/**
213 * Removes a page table based mapping.
214 *
215 * @returns VBox status code.
216 * @param pVM VM Handle.
217 * @param GCPtr Virtual Address. (Page table aligned!)
218 */
219VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
220{
221 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr));
222 AssertReturn(pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
223
224 /*
225 * Find it.
226 */
227 PPGMMAPPING pPrev = NULL;
228 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
229 while (pCur)
230 {
231 if (pCur->GCPtr == GCPtr)
232 {
233 /*
234 * Unlink it.
235 */
236 if (pPrev)
237 {
238 pPrev->pNextR3 = pCur->pNextR3;
239 pPrev->pNextRC = pCur->pNextRC;
240 pPrev->pNextR0 = pCur->pNextR0;
241 }
242 else
243 {
244 pVM->pgm.s.pMappingsR3 = pCur->pNextR3;
245 pVM->pgm.s.pMappingsRC = pCur->pNextRC;
246 pVM->pgm.s.pMappingsR0 = pCur->pNextR0;
247 }
248
249 /*
250 * Free the page table memory, clear page directory entries
251 * and free the page tables and node memory.
252 */
253 MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
254 pgmR3MapClearPDEs(pVM, pCur, pCur->GCPtr >> X86_PD_SHIFT);
255 MMHyperFree(pVM, pCur);
256
257 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
258 return VINF_SUCCESS;
259 }
260
261 /* done? */
262 if (pCur->GCPtr > GCPtr)
263 break;
264
265 /* next */
266 pPrev = pCur;
267 pCur = pCur->pNextR3;
268 }
269
270 AssertMsgFailed(("No mapping for %#x found!\n", GCPtr));
271 return VERR_INVALID_PARAMETER;
272}
273
274
275/**
276 * Checks whether a range of PDEs in the intermediate
277 * memory context are unused.
278 *
279 * We're talking 32-bit PDEs here.
280 *
281 * @returns true/false.
282 * @param pVM Pointer to the shared VM structure.
283 * @param iPD The first PDE in the range.
284 * @param cPTs The number of PDEs in the range.
285 */
286DECLINLINE(bool) pgmR3AreIntermediatePDEsUnused(PVM pVM, unsigned iPD, unsigned cPTs)
287{
288 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
289 return false;
290 while (cPTs > 1)
291 {
292 iPD++;
293 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
294 return false;
295 cPTs--;
296 }
297 return true;
298}
299
300
301/**
302 * Unlinks the mapping.
303 *
304 * The mapping *must* be in the list.
305 *
306 * @param pVM Pointer to the shared VM structure.
307 * @param pMapping The mapping to unlink.
308 */
309static void pgmR3MapUnlink(PVM pVM, PPGMMAPPING pMapping)
310{
311 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
312 if (pAfterThis == pMapping)
313 {
314 /* head */
315 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
316 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
317 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
318 }
319 else
320 {
321 /* in the list */
322 while (pAfterThis->pNextR3 != pMapping)
323 {
324 pAfterThis = pAfterThis->pNextR3;
325 AssertReleaseReturnVoid(pAfterThis);
326 }
327
328 pAfterThis->pNextR3 = pMapping->pNextR3;
329 pAfterThis->pNextRC = pMapping->pNextRC;
330 pAfterThis->pNextR0 = pMapping->pNextR0;
331 }
332}
333
334
335/**
336 * Links the mapping.
337 *
338 * @param pVM Pointer to the shared VM structure.
339 * @param pMapping The mapping to linked.
340 */
341static void pgmR3MapLink(PVM pVM, PPGMMAPPING pMapping)
342{
343 /*
344 * Find the list location (it's sorted by GCPhys) and link it in.
345 */
346 if ( !pVM->pgm.s.pMappingsR3
347 || pVM->pgm.s.pMappingsR3->GCPtr > pMapping->GCPtr)
348 {
349 /* head */
350 pMapping->pNextR3 = pVM->pgm.s.pMappingsR3;
351 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
352 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
353 pVM->pgm.s.pMappingsR3 = pMapping;
354 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
355 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
356 }
357 else
358 {
359 /* in the list */
360 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
361 PPGMMAPPING pBeforeThis = pAfterThis->pNextR3;
362 while (pBeforeThis && pBeforeThis->GCPtr <= pMapping->GCPtr)
363 {
364 pAfterThis = pBeforeThis;
365 pBeforeThis = pBeforeThis->pNextR3;
366 }
367
368 pMapping->pNextR3 = pAfterThis->pNextR3;
369 pMapping->pNextRC = pAfterThis->pNextRC;
370 pMapping->pNextR0 = pAfterThis->pNextR0;
371 pAfterThis->pNextR3 = pMapping;
372 pAfterThis->pNextRC = MMHyperR3ToRC(pVM, pMapping);
373 pAfterThis->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
374 }
375}
376
377
378/**
379 * Finalizes the intermediate context.
380 *
381 * This is called at the end of the ring-3 init and will construct the
382 * intermediate paging structures, relocating all the mappings in the process.
383 *
384 * @returns VBox status code.
385 * @param pVM Pointer to the shared VM structure.
386 * @thread EMT(0)
387 */
388VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM)
389{
390 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
391 pVM->pgm.s.fFinalizedMappings = true;
392
393 /*
394 * Loop until all mappings have been finalized.
395 */
396 /*unsigned iPDNext = UINT32_C(0xc0000000) >> X86_PD_SHIFT;*/ /* makes CSAM/PATM freak out booting linux. :-/ */
397#if 0
398 unsigned iPDNext = MM_HYPER_AREA_ADDRESS >> X86_PD_SHIFT;
399#else
400 unsigned iPDNext = 1 << X86_PD_SHIFT; /* no hint, map them from the top. */
401#endif
402 PPGMMAPPING pCur;
403 do
404 {
405 pCur = pVM->pgm.s.pMappingsR3;
406 while (pCur)
407 {
408 if (!pCur->fFinalized)
409 {
410 /*
411 * Find a suitable location.
412 */
413 RTGCPTR const GCPtrOld = pCur->GCPtr;
414 const unsigned cPTs = pCur->cPTs;
415 unsigned iPDNew = iPDNext;
416 if ( iPDNew + cPTs >= X86_PG_ENTRIES /* exclude the last PD */
417 || !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
418 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
419 {
420 /* No luck, just scan down from 4GB-4MB, giving up at 4MB. */
421 iPDNew = X86_PG_ENTRIES - cPTs - 1;
422 while ( iPDNew > 0
423 && ( !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
424 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
425 )
426 iPDNew--;
427 AssertLogRelReturn(iPDNew != 0, VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
428 }
429
430 /*
431 * Relocate it (something akin to pgmR3MapRelocate).
432 */
433 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
434
435 /* unlink the mapping, update the entry and relink it. */
436 pgmR3MapUnlink(pVM, pCur);
437
438 RTGCPTR const GCPtrNew = (RTGCPTR)iPDNew << X86_PD_SHIFT;
439 pCur->GCPtr = GCPtrNew;
440 pCur->GCPtrLast = GCPtrNew + pCur->cb - 1;
441 pCur->fFinalized = true;
442
443 pgmR3MapLink(pVM, pCur);
444
445 /* Finally work the callback. */
446 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrNew, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
447
448 /*
449 * The list order might have changed, start from the beginning again.
450 */
451 iPDNext = iPDNew + cPTs;
452 break;
453 }
454
455 /* next */
456 pCur = pCur->pNextR3;
457 }
458 } while (pCur);
459
460 return VINF_SUCCESS;
461}
462
463
464/**
465 * Gets the size of the current guest mappings if they were to be
466 * put next to oneanother.
467 *
468 * @returns VBox status code.
469 * @param pVM The VM.
470 * @param pcb Where to store the size.
471 */
472VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
473{
474 RTGCPTR cb = 0;
475 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
476 cb += pCur->cb;
477
478 *pcb = cb;
479 AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG);
480 Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb));
481 return VINF_SUCCESS;
482}
483
484
485/**
486 * Fixes the guest context mappings in a range reserved from the Guest OS.
487 *
488 * @returns VBox status code.
489 * @param pVM The VM.
490 * @param GCPtrBase The address of the reserved range of guest memory.
491 * @param cb The size of the range starting at GCPtrBase.
492 */
493VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
494{
495 Log(("PGMR3MappingsFix: GCPtrBase=%#x cb=%#x\n", GCPtrBase, cb));
496
497 /* Ignore the additions mapping fix call in VT-x/AMD-V. */
498 if ( pVM->pgm.s.fMappingsFixed
499 && HWACCMR3IsActive(pVM))
500 return VINF_SUCCESS;
501
502 /*
503 * This is all or nothing at all. So, a tiny bit of paranoia first.
504 */
505 if (GCPtrBase & X86_PAGE_4M_OFFSET_MASK)
506 {
507 AssertMsgFailed(("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase));
508 return VERR_INVALID_PARAMETER;
509 }
510 if (!cb || (cb & X86_PAGE_4M_OFFSET_MASK))
511 {
512 AssertMsgFailed(("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb));
513 return VERR_INVALID_PARAMETER;
514 }
515
516 /*
517 * Before we do anything we'll do a forced PD sync to try make sure any
518 * pending relocations because of these mappings have been resolved.
519 */
520 PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), true);
521
522 /*
523 * Check that it's not conflicting with a core code mapping in the intermediate page table.
524 */
525 unsigned iPDNew = GCPtrBase >> X86_PD_SHIFT;
526 unsigned i = cb >> X86_PD_SHIFT;
527 while (i-- > 0)
528 {
529 if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)
530 {
531 /* Check that it's not one or our mappings. */
532 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
533 while (pCur)
534 {
535 if (iPDNew + i - (pCur->GCPtr >> X86_PD_SHIFT) < (pCur->cb >> X86_PD_SHIFT))
536 break;
537 pCur = pCur->pNextR3;
538 }
539 if (!pCur)
540 {
541 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
542 iPDNew + i, GCPtrBase, cb));
543 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
544 }
545 }
546 }
547
548 /*
549 * In PAE / PAE mode, make sure we don't cross page directories.
550 */
551 if ( ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
552 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX)
553 && ( pVM->pgm.s.enmShadowMode == PGMMODE_PAE
554 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX))
555 {
556 unsigned iPdptBase = GCPtrBase >> X86_PDPT_SHIFT;
557 unsigned iPdptLast = (GCPtrBase + cb - 1) >> X86_PDPT_SHIFT;
558 if (iPdptBase != iPdptLast)
559 {
560 LogRel(("PGMR3MappingsFix: Crosses PD boundrary; iPdptBase=%#x iPdptLast=%#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
561 iPdptBase, iPdptLast, GCPtrBase, cb));
562 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
563 }
564 }
565
566 /*
567 * Loop the mappings and check that they all agree on their new locations.
568 */
569 RTGCPTR GCPtrCur = GCPtrBase;
570 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
571 while (pCur)
572 {
573 if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
574 {
575 AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));
576 return VERR_PGM_MAPPINGS_FIX_REJECTED;
577 }
578 /* next */
579 GCPtrCur += pCur->cb;
580 pCur = pCur->pNextR3;
581 }
582 if (GCPtrCur > GCPtrBase + cb)
583 {
584 AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));
585 return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;
586 }
587
588 /*
589 * Loop the table assigning the mappings to the passed in memory
590 * and call their relocator callback.
591 */
592 GCPtrCur = GCPtrBase;
593 pCur = pVM->pgm.s.pMappingsR3;
594 while (pCur)
595 {
596 unsigned iPDOld = pCur->GCPtr >> X86_PD_SHIFT;
597 iPDNew = GCPtrCur >> X86_PD_SHIFT;
598
599 /*
600 * Relocate the page table(s).
601 */
602 pgmR3MapClearPDEs(pVM, pCur, iPDOld);
603 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
604
605 /*
606 * Update the entry.
607 */
608 pCur->GCPtr = GCPtrCur;
609 pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;
610
611 /*
612 * Callback to execute the relocation.
613 */
614 pCur->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
615
616 /*
617 * Advance.
618 */
619 GCPtrCur += pCur->cb;
620 pCur = pCur->pNextR3;
621 }
622
623 /*
624 * Mark the mappings as fixed and return.
625 */
626 pVM->pgm.s.fMappingsFixed = true;
627 pVM->pgm.s.GCPtrMappingFixed = GCPtrBase;
628 pVM->pgm.s.cbMappingFixed = cb;
629 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
630 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
631 return VINF_SUCCESS;
632}
633
634/**
635 * Disable the hypervisor mappings in the shadow page tables (doesn't touch the intermediate table!)
636 *
637 * @returns VBox status code.
638 * @param pVM The VM.
639 */
640VMMR3DECL(int) PGMR3MappingsDisable(PVM pVM)
641{
642 uint32_t cb;
643 int rc = PGMR3MappingsSize(pVM, &cb);
644 AssertRCReturn(rc, rc);
645
646 rc = pgmMapDeactivateCR3(pVM, pVM->pgm.s.pShwPageCR3R3);
647 AssertRCReturn(rc, rc);
648
649 /*
650 * Mark the mappings as fixed (using fake values) and disabled.
651 */
652 pVM->pgm.s.fDisableMappings = true;
653 pVM->pgm.s.fMappingsFixed = true;
654 pVM->pgm.s.GCPtrMappingFixed = MM_HYPER_AREA_ADDRESS;
655 pVM->pgm.s.cbMappingFixed = cb;
656 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
657 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
658 return VINF_SUCCESS;
659}
660
661
662/**
663 * Unfixes the mappings.
664 * After calling this function mapping conflict detection will be enabled.
665 *
666 * @returns VBox status code.
667 * @param pVM The VM.
668 */
669VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
670{
671 Log(("PGMR3MappingsUnfix: fMappingsFixed=%d\n", pVM->pgm.s.fMappingsFixed));
672
673 /* Ignore in VT-x/AMD-V mode. */
674 if (HWACCMR3IsActive(pVM))
675 return VINF_SUCCESS;
676
677 pVM->pgm.s.fMappingsFixed = false;
678 pVM->pgm.s.GCPtrMappingFixed = 0;
679 pVM->pgm.s.cbMappingFixed = 0;
680 return VINF_SUCCESS;
681}
682
683
684/**
685 * Map pages into the intermediate context (switcher code).
686 * These pages are mapped at both the give virtual address and at
687 * the physical address (for identity mapping).
688 *
689 * @returns VBox status code.
690 * @param pVM The virtual machine.
691 * @param Addr Intermediate context address of the mapping.
692 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!
693 * @param cbPages Number of bytes to map.
694 *
695 * @remark This API shall not be used to anything but mapping the switcher code.
696 */
697VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)
698{
699 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%RHp cbPages=%#x\n", Addr, HCPhys, cbPages));
700
701 /*
702 * Adjust input.
703 */
704 cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;
705 cbPages = RT_ALIGN(cbPages, PAGE_SIZE);
706 HCPhys &= X86_PTE_PAE_PG_MASK;
707 Addr &= PAGE_BASE_MASK;
708 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */
709 uint32_t uAddress = (uint32_t)Addr;
710
711 /*
712 * Assert input and state.
713 */
714 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
715 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));
716 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));
717 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages));
718 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
719
720 /*
721 * Check for internal conflicts between the virtual address and the physical address.
722 * A 1:1 mapping is fine, but partial overlapping is a no-no.
723 */
724 if ( uAddress != HCPhys
725 && ( uAddress < HCPhys
726 ? HCPhys - uAddress < cbPages
727 : uAddress - HCPhys < cbPages
728 )
729 )
730 AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages),
731 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
732
733 const unsigned cPages = cbPages >> PAGE_SHIFT;
734 int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
735 if (RT_FAILURE(rc))
736 return rc;
737 rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
738 if (RT_FAILURE(rc))
739 return rc;
740
741 /*
742 * Everythings fine, do the mapping.
743 */
744 pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
745 pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
746
747 return VINF_SUCCESS;
748}
749
750
751/**
752 * Validates that there are no conflicts for this mapping into the intermediate context.
753 *
754 * @returns VBox status code.
755 * @param pVM VM handle.
756 * @param uAddress Address of the mapping.
757 * @param cPages Number of pages.
758 * @param pPTDefault Pointer to the default page table for this mapping.
759 * @param pPTPaeDefault Pointer to the default page table for this mapping.
760 */
761static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
762{
763 AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme\n"));
764
765 /*
766 * Check that the ranges are available.
767 * (This code doesn't have to be fast.)
768 */
769 while (cPages > 0)
770 {
771 /*
772 * 32-Bit.
773 */
774 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
775 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
776 PX86PT pPT = pPTDefault;
777 if (pVM->pgm.s.pInterPD->a[iPDE].u)
778 {
779 RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;
780 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))
781 pPT = pVM->pgm.s.apInterPTs[0];
782 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))
783 pPT = pVM->pgm.s.apInterPTs[1];
784 else
785 {
786 /** @todo this must be handled with a relocation of the conflicting mapping!
787 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
788 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
789 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
790 }
791 }
792 if (pPT->a[iPTE].u)
793 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u),
794 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
795
796 /*
797 * PAE.
798 */
799 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
800 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
801 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
802 Assert(iPDPE < 4);
803 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
804 PX86PTPAE pPTPae = pPTPaeDefault;
805 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
806 {
807 RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;
808 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
809 pPTPae = pVM->pgm.s.apInterPaePTs[0];
810 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
811 pPTPae = pVM->pgm.s.apInterPaePTs[1];
812 else
813 {
814 /** @todo this must be handled with a relocation of the conflicting mapping!
815 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
816 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
817 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
818 }
819 }
820 if (pPTPae->a[iPTE].u)
821 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u),
822 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
823
824 /* next */
825 uAddress += PAGE_SIZE;
826 cPages--;
827 }
828
829 return VINF_SUCCESS;
830}
831
832
833
834/**
835 * Sets up the intermediate page tables for a verified mapping.
836 *
837 * @param pVM VM handle.
838 * @param uAddress Address of the mapping.
839 * @param HCPhys The physical address of the page range.
840 * @param cPages Number of pages.
841 * @param pPTDefault Pointer to the default page table for this mapping.
842 * @param pPTPaeDefault Pointer to the default page table for this mapping.
843 */
844static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
845{
846 while (cPages > 0)
847 {
848 /*
849 * 32-Bit.
850 */
851 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
852 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
853 PX86PT pPT;
854 if (pVM->pgm.s.pInterPD->a[iPDE].u)
855 pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);
856 else
857 {
858 pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
859 | (uint32_t)MMPage2Phys(pVM, pPTDefault);
860 pPT = pPTDefault;
861 }
862 pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;
863
864 /*
865 * PAE
866 */
867 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
868 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
869 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
870 Assert(iPDPE < 4);
871 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
872 PX86PTPAE pPTPae;
873 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
874 pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);
875 else
876 {
877 pPTPae = pPTPaeDefault;
878 pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
879 | MMPage2Phys(pVM, pPTPaeDefault);
880 }
881 pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;
882
883 /* next */
884 cPages--;
885 HCPhys += PAGE_SIZE;
886 uAddress += PAGE_SIZE;
887 }
888}
889
890
891/**
892 * Clears all PDEs involved with the mapping in the shadow and intermediate page tables.
893 *
894 * @param pVM The VM handle.
895 * @param pMap Pointer to the mapping in question.
896 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
897 */
898static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
899{
900 unsigned i = pMap->cPTs;
901
902 pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE);
903
904 iOldPDE += i;
905 while (i-- > 0)
906 {
907 iOldPDE--;
908
909 /*
910 * 32-bit.
911 */
912 pVM->pgm.s.pInterPD->a[iOldPDE].u = 0;
913 /*
914 * PAE.
915 */
916 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
917 unsigned iPDE = iOldPDE * 2 % 512;
918 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
919 iPDE++;
920 AssertFatal(iPDE < 512);
921 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
922 }
923}
924
925/**
926 * Sets all PDEs involved with the mapping in the shadow and intermediate page tables.
927 *
928 * @param pVM The VM handle.
929 * @param pMap Pointer to the mapping in question.
930 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
931 */
932static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
933{
934 PPGM pPGM = &pVM->pgm.s;
935
936 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX);
937
938 pgmMapSetShadowPDEs(pVM, pMap, iNewPDE);
939
940 /*
941 * Init the page tables and insert them into the page directories.
942 */
943 unsigned i = pMap->cPTs;
944 iNewPDE += i;
945 while (i-- > 0)
946 {
947 iNewPDE--;
948
949 /*
950 * 32-bit.
951 */
952 X86PDE Pde;
953 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
954 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
955 pPGM->pInterPD->a[iNewPDE] = Pde;
956 /*
957 * PAE.
958 */
959 const unsigned iPD = iNewPDE / 256;
960 unsigned iPDE = iNewPDE * 2 % 512;
961 X86PDEPAE PdePae0;
962 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
963 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
964 iPDE++;
965 AssertFatal(iPDE < 512);
966 X86PDEPAE PdePae1;
967 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
968 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
969 }
970}
971
972/**
973 * Relocates a mapping to a new address.
974 *
975 * @param pVM VM handle.
976 * @param pMapping The mapping to relocate.
977 * @param GCPtrOldMapping The address of the start of the old mapping.
978 * @param GCPtrNewMapping The address of the start of the new mapping.
979 */
980void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping)
981{
982 unsigned iPDOld = GCPtrOldMapping >> X86_PD_SHIFT;
983 unsigned iPDNew = GCPtrNewMapping >> X86_PD_SHIFT;
984
985 Log(("PGM: Relocating %s from %RGv to %RGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping));
986 AssertMsg(((unsigned)iPDOld << X86_PD_SHIFT) == pMapping->GCPtr, ("%RGv vs %RGv\n", (RTGCPTR)((unsigned)iPDOld << X86_PD_SHIFT), pMapping->GCPtr));
987
988 /*
989 * Relocate the page table(s).
990 */
991 pgmR3MapClearPDEs(pVM, pMapping, iPDOld);
992 pgmR3MapSetPDEs(pVM, pMapping, iPDNew);
993
994 /*
995 * Update and resort the mapping list.
996 */
997
998 /* Find previous mapping for pMapping, put result into pPrevMap. */
999 PPGMMAPPING pPrevMap = NULL;
1000 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
1001 while (pCur && pCur != pMapping)
1002 {
1003 /* next */
1004 pPrevMap = pCur;
1005 pCur = pCur->pNextR3;
1006 }
1007 Assert(pCur);
1008
1009 /* Find mapping which >= than pMapping. */
1010 RTGCPTR GCPtrNew = iPDNew << X86_PD_SHIFT;
1011 PPGMMAPPING pPrev = NULL;
1012 pCur = pVM->pgm.s.pMappingsR3;
1013 while (pCur && pCur->GCPtr < GCPtrNew)
1014 {
1015 /* next */
1016 pPrev = pCur;
1017 pCur = pCur->pNextR3;
1018 }
1019
1020 if (pCur != pMapping && pPrev != pMapping)
1021 {
1022 /*
1023 * Unlink.
1024 */
1025 if (pPrevMap)
1026 {
1027 pPrevMap->pNextR3 = pMapping->pNextR3;
1028 pPrevMap->pNextRC = pMapping->pNextRC;
1029 pPrevMap->pNextR0 = pMapping->pNextR0;
1030 }
1031 else
1032 {
1033 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
1034 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
1035 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
1036 }
1037
1038 /*
1039 * Link
1040 */
1041 pMapping->pNextR3 = pCur;
1042 if (pPrev)
1043 {
1044 pMapping->pNextRC = pPrev->pNextRC;
1045 pMapping->pNextR0 = pPrev->pNextR0;
1046 pPrev->pNextR3 = pMapping;
1047 pPrev->pNextRC = MMHyperR3ToRC(pVM, pMapping);
1048 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
1049 }
1050 else
1051 {
1052 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
1053 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
1054 pVM->pgm.s.pMappingsR3 = pMapping;
1055 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
1056 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
1057 }
1058 }
1059
1060 /*
1061 * Update the entry.
1062 */
1063 pMapping->GCPtr = GCPtrNew;
1064 pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;
1065
1066 /*
1067 * Callback to execute the relocation.
1068 */
1069 pMapping->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);
1070}
1071
1072/**
1073 * Checks if a new mapping address wasn't previously used and caused a clash with guest mappings.
1074 *
1075 * @returns VBox status code.
1076 * @param pMapping The mapping which conflicts.
1077 * @param GCPtr New mapping address to try
1078 */
1079bool pgmR3MapIsKnownConflictAddress(PPGMMAPPING pMapping, RTGCPTR GCPtr)
1080{
1081 for (int i=0;i<RT_ELEMENTS(pMapping->GCPtrConflict);i++)
1082 {
1083 if (GCPtr == pMapping->GCPtrConflict[i])
1084 return true;
1085 }
1086 return false;
1087}
1088
1089/**
1090 * Resolves a conflict between a page table based GC mapping and
1091 * the Guest OS page tables. (32 bits version)
1092 *
1093 * @returns VBox status code.
1094 * @param pVM VM Handle.
1095 * @param pMapping The mapping which conflicts.
1096 * @param pPDSrc The page directory of the guest OS.
1097 * @param GCPtrOldMapping The address of the start of the current mapping.
1098 */
1099int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping)
1100{
1101 STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);
1102 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1103
1104 pMapping->GCPtrConflict[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
1105 pMapping->cConflicts++;
1106
1107 /*
1108 * Scan for free page directory entries.
1109 *
1110 * Note that we do not support mappings at the very end of the
1111 * address space since that will break our GCPtrEnd assumptions.
1112 */
1113 const unsigned cPTs = pMapping->cPTs;
1114 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1115 while (iPDNew-- > 0)
1116 {
1117 if (pPDSrc->a[iPDNew].n.u1Present)
1118 continue;
1119
1120 if (pgmR3MapIsKnownConflictAddress(pMapping, iPDNew << X86_PD_SHIFT))
1121 continue;
1122
1123 if (cPTs > 1)
1124 {
1125 bool fOk = true;
1126 for (unsigned i = 1; fOk && i < cPTs; i++)
1127 if (pPDSrc->a[iPDNew + i].n.u1Present)
1128 fOk = false;
1129 if (!fOk)
1130 continue;
1131 }
1132
1133 /*
1134 * Check that it's not conflicting with an intermediate page table mapping.
1135 */
1136 bool fOk = true;
1137 unsigned i = cPTs;
1138 while (fOk && i-- > 0)
1139 fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;
1140 if (!fOk)
1141 continue;
1142 /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */
1143
1144 /*
1145 * Ask for the mapping.
1146 */
1147 RTGCPTR GCPtrNewMapping = iPDNew << X86_PD_SHIFT;
1148
1149 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1150 {
1151 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1152 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1153 return VINF_SUCCESS;
1154 }
1155 }
1156
1157 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1158 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs));
1159 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1160}
1161
1162
1163/**
1164 * Resolves a conflict between a page table based GC mapping and
1165 * the Guest OS page tables. (PAE bits version)
1166 *
1167 * @returns VBox status code.
1168 * @param pVM VM Handle.
1169 * @param pMapping The mapping which conflicts.
1170 * @param GCPtrOldMapping The address of the start of the current mapping.
1171 */
1172int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping)
1173{
1174 STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);
1175 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1176
1177 pMapping->GCPtrConflict[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
1178 pMapping->cConflicts++;
1179
1180 for (int iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--)
1181 {
1182 unsigned iPDSrc;
1183 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
1184
1185 /*
1186 * Scan for free page directory entries.
1187 *
1188 * Note that we do not support mappings at the very end of the
1189 * address space since that will break our GCPtrEnd assumptions.
1190 * Nor do we support mappings crossing page directories.
1191 */
1192 const unsigned cPTs = pMapping->cb >> X86_PD_PAE_SHIFT;
1193 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1194
1195 while (iPDNew-- > 0)
1196 {
1197 /* Ugly assumption that mappings start on a 4 MB boundary. */
1198 if (iPDNew & 1)
1199 continue;
1200
1201 if (pgmR3MapIsKnownConflictAddress(pMapping, ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT)))
1202 continue;
1203
1204 if (pPDSrc)
1205 {
1206 if (pPDSrc->a[iPDNew].n.u1Present)
1207 continue;
1208 if (cPTs > 1)
1209 {
1210 bool fOk = true;
1211 for (unsigned i = 1; fOk && i < cPTs; i++)
1212 if (pPDSrc->a[iPDNew + i].n.u1Present)
1213 fOk = false;
1214 if (!fOk)
1215 continue;
1216 }
1217 }
1218 /*
1219 * Check that it's not conflicting with an intermediate page table mapping.
1220 */
1221 bool fOk = true;
1222 unsigned i = cPTs;
1223 while (fOk && i-- > 0)
1224 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present;
1225 if (!fOk)
1226 continue;
1227
1228 /*
1229 * Ask for the mapping.
1230 */
1231 RTGCPTR GCPtrNewMapping = ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT);
1232
1233 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1234 {
1235 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1236 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1237 return VINF_SUCCESS;
1238 }
1239 }
1240 }
1241 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1242 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT));
1243 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1244}
1245
1246/**
1247 * Read memory from the guest mappings.
1248 *
1249 * This will use the page tables associated with the mappings to
1250 * read the memory. This means that not all kind of memory is readable
1251 * since we don't necessarily know how to convert that physical address
1252 * to a HC virtual one.
1253 *
1254 * @returns VBox status.
1255 * @param pVM VM handle.
1256 * @param pvDst The destination address (HC of course).
1257 * @param GCPtrSrc The source address (GC virtual address).
1258 * @param cb Number of bytes to read.
1259 *
1260 * @remarks The is indirectly for DBGF only.
1261 * @todo Consider renaming it to indicate it's special usage, or just
1262 * reimplement it in MMR3HyperReadGCVirt.
1263 */
1264VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1265{
1266 /*
1267 * Simplicity over speed... Chop the request up into chunks
1268 * which don't cross pages.
1269 */
1270 if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)
1271 {
1272 for (;;)
1273 {
1274 size_t cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));
1275 int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);
1276 if (RT_FAILURE(rc))
1277 return rc;
1278 cb -= cbRead;
1279 if (!cb)
1280 break;
1281 pvDst = (char *)pvDst + cbRead;
1282 GCPtrSrc += cbRead;
1283 }
1284 return VINF_SUCCESS;
1285 }
1286
1287 /*
1288 * Find the mapping.
1289 */
1290 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
1291 while (pCur)
1292 {
1293 RTGCPTR off = GCPtrSrc - pCur->GCPtr;
1294 if (off < pCur->cb)
1295 {
1296 if (off + cb > pCur->cb)
1297 {
1298 AssertMsgFailed(("Invalid page range %RGv LB%#x. mapping '%s' %RGv to %RGv\n",
1299 GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));
1300 return VERR_INVALID_PARAMETER;
1301 }
1302
1303 unsigned iPT = off >> X86_PD_SHIFT;
1304 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
1305 while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))
1306 {
1307 if (!CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].n.u1Present)
1308 return VERR_PAGE_NOT_PRESENT;
1309 RTHCPHYS HCPhys = CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].u & X86_PTE_PAE_PG_MASK;
1310
1311 /*
1312 * Get the virtual page from the physical one.
1313 */
1314 void *pvPage;
1315 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);
1316 if (RT_FAILURE(rc))
1317 return rc;
1318
1319 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
1320 return VINF_SUCCESS;
1321 }
1322 }
1323
1324 /* next */
1325 pCur = CTXALLSUFF(pCur->pNext);
1326 }
1327
1328 return VERR_INVALID_POINTER;
1329}
1330
1331
1332/**
1333 * Info callback for 'pgmhandlers'.
1334 *
1335 * @param pHlp The output helpers.
1336 * @param pszArgs The arguments. phys or virt.
1337 */
1338DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1339{
1340 pHlp->pfnPrintf(pHlp, pVM->pgm.s.fMappingsFixed
1341 ? "\nThe mappings are FIXED.\n"
1342 : "\nThe mappings are FLOATING.\n");
1343 PPGMMAPPING pCur;
1344 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1345 pHlp->pfnPrintf(pHlp, "%RGv - %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
1346}
1347
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette