VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMMap.cpp@ 19472

Last change on this file since 19472 was 19141, checked in by vboxsync, 16 years ago

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 47.7 KB
Line 
1/* $Id: PGMMap.cpp 19141 2009-04-23 13:52:18Z vboxsync $ */
2/** @file
3 * PGM - Page Manager, Guest Context Mappings.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include "PGMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37
38
39/*******************************************************************************
40* Internal Functions *
41*******************************************************************************/
42static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
43static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
44static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
45static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
46
47
48/**
49 * Creates a page table based mapping in GC.
50 *
51 * @returns VBox status code.
52 * @param pVM VM Handle.
53 * @param GCPtr Virtual Address. (Page table aligned!)
54 * @param cb Size of the range. Must be a 4MB aligned!
55 * @param fFlags PGMR3MAPPT_FLAGS_UNMAPPABLE or 0.
56 * @param pfnRelocate Relocation callback function.
57 * @param pvUser User argument to the callback.
58 * @param pszDesc Pointer to description string. This must not be freed.
59 */
60VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
61{
62 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d fFlags=%#x pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, fFlags, pfnRelocate, pvUser, pszDesc));
63 AssertMsg(pVM->pgm.s.pInterPD, ("Paging isn't initialized, init order problems!\n"));
64
65 /*
66 * Validate input.
67 */
68 Assert(!fFlags || fFlags == PGMR3MAPPT_FLAGS_UNMAPPABLE);
69 if (cb < _2M || cb > 64 * _1M)
70 {
71 AssertMsgFailed(("Serious? cb=%d\n", cb));
72 return VERR_INVALID_PARAMETER;
73 }
74 cb = RT_ALIGN_32(cb, _4M);
75 RTGCPTR GCPtrLast = GCPtr + cb - 1;
76 if (GCPtrLast < GCPtr)
77 {
78 AssertMsgFailed(("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast));
79 return VERR_INVALID_PARAMETER;
80 }
81 if (pVM->pgm.s.fMappingsFixed)
82 {
83 AssertMsgFailed(("Mappings are fixed! It's not possible to add new mappings at this time!\n"));
84 return VERR_PGM_MAPPINGS_FIXED;
85 }
86 if (!pfnRelocate)
87 {
88 AssertMsgFailed(("Callback is required\n"));
89 return VERR_INVALID_PARAMETER;
90 }
91
92 /*
93 * Find list location.
94 */
95 PPGMMAPPING pPrev = NULL;
96 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
97 while (pCur)
98 {
99 if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast)
100 {
101 AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
102 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
103 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
104 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
105 return VERR_PGM_MAPPING_CONFLICT;
106 }
107 if (pCur->GCPtr > GCPtr)
108 break;
109 pPrev = pCur;
110 pCur = pCur->pNextR3;
111 }
112
113 /*
114 * Check for conflicts with intermediate mappings.
115 */
116 const unsigned iPageDir = GCPtr >> X86_PD_SHIFT;
117 const unsigned cPTs = cb >> X86_PD_SHIFT;
118 if (pVM->pgm.s.fFinalizedMappings)
119 {
120 for (unsigned i = 0; i < cPTs; i++)
121 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)
122 {
123 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
124 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
125 return VERR_PGM_MAPPING_CONFLICT;
126 }
127 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */
128 }
129
130 /*
131 * Allocate and initialize the new list node.
132 */
133 PPGMMAPPING pNew;
134 int rc;
135 if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE)
136 rc = MMHyperAlloc( pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew);
137 else
138 rc = MMR3HyperAllocOnceNoRel(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew);
139 if (RT_FAILURE(rc))
140 return rc;
141 pNew->GCPtr = GCPtr;
142 pNew->GCPtrLast = GCPtrLast;
143 pNew->cb = cb;
144 pNew->pszDesc = pszDesc;
145 pNew->pfnRelocate = pfnRelocate;
146 pNew->pvUser = pvUser;
147 pNew->cPTs = cPTs;
148
149 /*
150 * Allocate page tables and insert them into the page directories.
151 * (One 32-bit PT and two PAE PTs.)
152 */
153 uint8_t *pbPTs;
154 if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE)
155 rc = MMHyperAlloc( pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs);
156 else
157 rc = MMR3HyperAllocOnceNoRel(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs);
158 if (RT_FAILURE(rc))
159 {
160 MMHyperFree(pVM, pNew);
161 return VERR_NO_MEMORY;
162 }
163
164 /*
165 * Init the page tables and insert them into the page directories.
166 */
167 Log4(("PGMR3MapPT: GCPtr=%RGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs));
168 for (unsigned i = 0; i < cPTs; i++)
169 {
170 /*
171 * 32-bit.
172 */
173 pNew->aPTs[i].pPTR3 = (PX86PT)pbPTs;
174 pNew->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pNew->aPTs[i].pPTR3);
175 pNew->aPTs[i].pPTR0 = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3);
176 pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3);
177 pbPTs += PAGE_SIZE;
178 Log4(("PGMR3MapPT: i=%d: pPTR3=%RHv pPTRC=%RRv pPRTR0=%RHv HCPhysPT=%RHp\n",
179 i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTRC, pNew->aPTs[i].pPTR0, pNew->aPTs[i].HCPhysPT));
180
181 /*
182 * PAE.
183 */
184 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs);
185 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE);
186 pNew->aPTs[i].paPaePTsR3 = (PX86PTPAE)pbPTs;
187 pNew->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pbPTs);
188 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs);
189 pbPTs += PAGE_SIZE * 2;
190 Log4(("PGMR3MapPT: i=%d: paPaePTsR#=%RHv paPaePTsRC=%RRv paPaePTsR#=%RHv HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n",
191 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsRC, pNew->aPTs[i].paPaePTsR0, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1));
192 }
193 if (pVM->pgm.s.fFinalizedMappings)
194 pgmR3MapSetPDEs(pVM, pNew, iPageDir);
195 /* else PGMR3FinalizeMappings() */
196
197 /*
198 * Insert the new mapping.
199 */
200 pNew->pNextR3 = pCur;
201 pNew->pNextRC = pCur ? MMHyperR3ToRC(pVM, pCur) : NIL_RTRCPTR;
202 pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : NIL_RTR0PTR;
203 if (pPrev)
204 {
205 pPrev->pNextR3 = pNew;
206 pPrev->pNextRC = MMHyperR3ToRC(pVM, pNew);
207 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew);
208 }
209 else
210 {
211 pVM->pgm.s.pMappingsR3 = pNew;
212 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pNew);
213 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew);
214 }
215
216 for (unsigned i=0;i<pVM->cCPUs;i++)
217 {
218 PVMCPU pVCpu = &pVM->aCpus[i];
219 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
220 }
221 return VINF_SUCCESS;
222}
223
224
225/**
226 * Removes a page table based mapping.
227 *
228 * @returns VBox status code.
229 * @param pVM VM Handle.
230 * @param GCPtr Virtual Address. (Page table aligned!)
231 *
232 * @remarks Don't call this without passing PGMR3MAPPT_FLAGS_UNMAPPABLE to
233 * PGMR3MapPT or you'll burn in the heap.
234 */
235VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
236{
237 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr));
238 AssertReturn(pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
239
240 /*
241 * Find it.
242 */
243 PPGMMAPPING pPrev = NULL;
244 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
245 while (pCur)
246 {
247 if (pCur->GCPtr == GCPtr)
248 {
249 /*
250 * Unlink it.
251 */
252 if (pPrev)
253 {
254 pPrev->pNextR3 = pCur->pNextR3;
255 pPrev->pNextRC = pCur->pNextRC;
256 pPrev->pNextR0 = pCur->pNextR0;
257 }
258 else
259 {
260 pVM->pgm.s.pMappingsR3 = pCur->pNextR3;
261 pVM->pgm.s.pMappingsRC = pCur->pNextRC;
262 pVM->pgm.s.pMappingsR0 = pCur->pNextR0;
263 }
264
265 /*
266 * Free the page table memory, clear page directory entries
267 * and free the page tables and node memory.
268 */
269 MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
270 pgmR3MapClearPDEs(pVM, pCur, pCur->GCPtr >> X86_PD_SHIFT);
271 MMHyperFree(pVM, pCur);
272
273 for (unsigned i=0;i<pVM->cCPUs;i++)
274 {
275 PVMCPU pVCpu = &pVM->aCpus[i];
276 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
277 }
278 return VINF_SUCCESS;
279 }
280
281 /* done? */
282 if (pCur->GCPtr > GCPtr)
283 break;
284
285 /* next */
286 pPrev = pCur;
287 pCur = pCur->pNextR3;
288 }
289
290 AssertMsgFailed(("No mapping for %#x found!\n", GCPtr));
291 return VERR_INVALID_PARAMETER;
292}
293
294
295/**
296 * Checks whether a range of PDEs in the intermediate
297 * memory context are unused.
298 *
299 * We're talking 32-bit PDEs here.
300 *
301 * @returns true/false.
302 * @param pVM Pointer to the shared VM structure.
303 * @param iPD The first PDE in the range.
304 * @param cPTs The number of PDEs in the range.
305 */
306DECLINLINE(bool) pgmR3AreIntermediatePDEsUnused(PVM pVM, unsigned iPD, unsigned cPTs)
307{
308 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
309 return false;
310 while (cPTs > 1)
311 {
312 iPD++;
313 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
314 return false;
315 cPTs--;
316 }
317 return true;
318}
319
320
321/**
322 * Unlinks the mapping.
323 *
324 * The mapping *must* be in the list.
325 *
326 * @param pVM Pointer to the shared VM structure.
327 * @param pMapping The mapping to unlink.
328 */
329static void pgmR3MapUnlink(PVM pVM, PPGMMAPPING pMapping)
330{
331 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
332 if (pAfterThis == pMapping)
333 {
334 /* head */
335 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
336 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
337 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
338 }
339 else
340 {
341 /* in the list */
342 while (pAfterThis->pNextR3 != pMapping)
343 {
344 pAfterThis = pAfterThis->pNextR3;
345 AssertReleaseReturnVoid(pAfterThis);
346 }
347
348 pAfterThis->pNextR3 = pMapping->pNextR3;
349 pAfterThis->pNextRC = pMapping->pNextRC;
350 pAfterThis->pNextR0 = pMapping->pNextR0;
351 }
352}
353
354
355/**
356 * Links the mapping.
357 *
358 * @param pVM Pointer to the shared VM structure.
359 * @param pMapping The mapping to linked.
360 */
361static void pgmR3MapLink(PVM pVM, PPGMMAPPING pMapping)
362{
363 /*
364 * Find the list location (it's sorted by GCPhys) and link it in.
365 */
366 if ( !pVM->pgm.s.pMappingsR3
367 || pVM->pgm.s.pMappingsR3->GCPtr > pMapping->GCPtr)
368 {
369 /* head */
370 pMapping->pNextR3 = pVM->pgm.s.pMappingsR3;
371 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
372 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
373 pVM->pgm.s.pMappingsR3 = pMapping;
374 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
375 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
376 }
377 else
378 {
379 /* in the list */
380 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
381 PPGMMAPPING pBeforeThis = pAfterThis->pNextR3;
382 while (pBeforeThis && pBeforeThis->GCPtr <= pMapping->GCPtr)
383 {
384 pAfterThis = pBeforeThis;
385 pBeforeThis = pBeforeThis->pNextR3;
386 }
387
388 pMapping->pNextR3 = pAfterThis->pNextR3;
389 pMapping->pNextRC = pAfterThis->pNextRC;
390 pMapping->pNextR0 = pAfterThis->pNextR0;
391 pAfterThis->pNextR3 = pMapping;
392 pAfterThis->pNextRC = MMHyperR3ToRC(pVM, pMapping);
393 pAfterThis->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
394 }
395}
396
397
398/**
399 * Finalizes the intermediate context.
400 *
401 * This is called at the end of the ring-3 init and will construct the
402 * intermediate paging structures, relocating all the mappings in the process.
403 *
404 * @returns VBox status code.
405 * @param pVM Pointer to the shared VM structure.
406 * @thread EMT(0)
407 */
408VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM)
409{
410 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
411 pVM->pgm.s.fFinalizedMappings = true;
412
413 /*
414 * Loop until all mappings have been finalized.
415 */
416 /*unsigned iPDNext = UINT32_C(0xc0000000) >> X86_PD_SHIFT;*/ /* makes CSAM/PATM freak out booting linux. :-/ */
417#if 0
418 unsigned iPDNext = MM_HYPER_AREA_ADDRESS >> X86_PD_SHIFT;
419#else
420 unsigned iPDNext = 1 << X86_PD_SHIFT; /* no hint, map them from the top. */
421#endif
422 PPGMMAPPING pCur;
423 do
424 {
425 pCur = pVM->pgm.s.pMappingsR3;
426 while (pCur)
427 {
428 if (!pCur->fFinalized)
429 {
430 /*
431 * Find a suitable location.
432 */
433 RTGCPTR const GCPtrOld = pCur->GCPtr;
434 const unsigned cPTs = pCur->cPTs;
435 unsigned iPDNew = iPDNext;
436 if ( iPDNew + cPTs >= X86_PG_ENTRIES /* exclude the last PD */
437 || !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
438 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
439 {
440 /* No luck, just scan down from 4GB-4MB, giving up at 4MB. */
441 iPDNew = X86_PG_ENTRIES - cPTs - 1;
442 while ( iPDNew > 0
443 && ( !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
444 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
445 )
446 iPDNew--;
447 AssertLogRelReturn(iPDNew != 0, VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
448 }
449
450 /*
451 * Relocate it (something akin to pgmR3MapRelocate).
452 */
453 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
454
455 /* unlink the mapping, update the entry and relink it. */
456 pgmR3MapUnlink(pVM, pCur);
457
458 RTGCPTR const GCPtrNew = (RTGCPTR)iPDNew << X86_PD_SHIFT;
459 pCur->GCPtr = GCPtrNew;
460 pCur->GCPtrLast = GCPtrNew + pCur->cb - 1;
461 pCur->fFinalized = true;
462
463 pgmR3MapLink(pVM, pCur);
464
465 /* Finally work the callback. */
466 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrNew, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
467
468 /*
469 * The list order might have changed, start from the beginning again.
470 */
471 iPDNext = iPDNew + cPTs;
472 break;
473 }
474
475 /* next */
476 pCur = pCur->pNextR3;
477 }
478 } while (pCur);
479
480 return VINF_SUCCESS;
481}
482
483
484/**
485 * Gets the size of the current guest mappings if they were to be
486 * put next to oneanother.
487 *
488 * @returns VBox status code.
489 * @param pVM The VM.
490 * @param pcb Where to store the size.
491 */
492VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
493{
494 RTGCPTR cb = 0;
495 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
496 cb += pCur->cb;
497
498 *pcb = cb;
499 AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG);
500 Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb));
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Fixes the guest context mappings in a range reserved from the Guest OS.
507 *
508 * @returns VBox status code.
509 * @param pVM The VM.
510 * @param GCPtrBase The address of the reserved range of guest memory.
511 * @param cb The size of the range starting at GCPtrBase.
512 */
513VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
514{
515 Log(("PGMR3MappingsFix: GCPtrBase=%#x cb=%#x\n", GCPtrBase, cb));
516
517 /* Ignore the additions mapping fix call in VT-x/AMD-V. */
518 if ( pVM->pgm.s.fMappingsFixed
519 && HWACCMR3IsActive(pVM))
520 return VINF_SUCCESS;
521
522 /* Only applies to VCPU 0 as we don't support SMP guests with raw mode. */
523 Assert(pVM->cCPUs == 1);
524
525 PVMCPU pVCpu = &pVM->aCpus[0];
526
527 /*
528 * This is all or nothing at all. So, a tiny bit of paranoia first.
529 */
530 if (GCPtrBase & X86_PAGE_4M_OFFSET_MASK)
531 {
532 AssertMsgFailed(("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase));
533 return VERR_INVALID_PARAMETER;
534 }
535 if (!cb || (cb & X86_PAGE_4M_OFFSET_MASK))
536 {
537 AssertMsgFailed(("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb));
538 return VERR_INVALID_PARAMETER;
539 }
540
541 /*
542 * Before we do anything we'll do a forced PD sync to try make sure any
543 * pending relocations because of these mappings have been resolved.
544 */
545 PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
546
547 /*
548 * Check that it's not conflicting with a core code mapping in the intermediate page table.
549 */
550 unsigned iPDNew = GCPtrBase >> X86_PD_SHIFT;
551 unsigned i = cb >> X86_PD_SHIFT;
552 while (i-- > 0)
553 {
554 if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)
555 {
556 /* Check that it's not one or our mappings. */
557 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
558 while (pCur)
559 {
560 if (iPDNew + i - (pCur->GCPtr >> X86_PD_SHIFT) < (pCur->cb >> X86_PD_SHIFT))
561 break;
562 pCur = pCur->pNextR3;
563 }
564 if (!pCur)
565 {
566 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
567 iPDNew + i, GCPtrBase, cb));
568 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
569 }
570 }
571 }
572
573 /*
574 * In PAE / PAE mode, make sure we don't cross page directories.
575 */
576 if ( ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
577 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX)
578 && ( pVCpu->pgm.s.enmShadowMode == PGMMODE_PAE
579 || pVCpu->pgm.s.enmShadowMode == PGMMODE_PAE_NX))
580 {
581 unsigned iPdptBase = GCPtrBase >> X86_PDPT_SHIFT;
582 unsigned iPdptLast = (GCPtrBase + cb - 1) >> X86_PDPT_SHIFT;
583 if (iPdptBase != iPdptLast)
584 {
585 LogRel(("PGMR3MappingsFix: Crosses PD boundrary; iPdptBase=%#x iPdptLast=%#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
586 iPdptBase, iPdptLast, GCPtrBase, cb));
587 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
588 }
589 }
590
591 /*
592 * Loop the mappings and check that they all agree on their new locations.
593 */
594 RTGCPTR GCPtrCur = GCPtrBase;
595 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
596 while (pCur)
597 {
598 if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
599 {
600 AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));
601 return VERR_PGM_MAPPINGS_FIX_REJECTED;
602 }
603 /* next */
604 GCPtrCur += pCur->cb;
605 pCur = pCur->pNextR3;
606 }
607 if (GCPtrCur > GCPtrBase + cb)
608 {
609 AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));
610 return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;
611 }
612
613 /*
614 * Loop the table assigning the mappings to the passed in memory
615 * and call their relocator callback.
616 */
617 GCPtrCur = GCPtrBase;
618 pCur = pVM->pgm.s.pMappingsR3;
619 while (pCur)
620 {
621 unsigned iPDOld = pCur->GCPtr >> X86_PD_SHIFT;
622 iPDNew = GCPtrCur >> X86_PD_SHIFT;
623
624 /*
625 * Relocate the page table(s).
626 */
627 pgmR3MapClearPDEs(pVM, pCur, iPDOld);
628 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
629
630 /*
631 * Update the entry.
632 */
633 pCur->GCPtr = GCPtrCur;
634 pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;
635
636 /*
637 * Callback to execute the relocation.
638 */
639 pCur->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
640
641 /*
642 * Advance.
643 */
644 GCPtrCur += pCur->cb;
645 pCur = pCur->pNextR3;
646 }
647
648 /*
649 * Mark the mappings as fixed and return.
650 */
651 pVM->pgm.s.fMappingsFixed = true;
652 pVM->pgm.s.GCPtrMappingFixed = GCPtrBase;
653 pVM->pgm.s.cbMappingFixed = cb;
654
655 for (unsigned i=0;i<pVM->cCPUs;i++)
656 {
657 PVMCPU pVCpu = &pVM->aCpus[i];
658 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
659 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
660 }
661 return VINF_SUCCESS;
662}
663
664/**
665 * Disable the hypervisor mappings in the shadow page tables (doesn't touch the intermediate table!)
666 *
667 * @returns VBox status code.
668 * @param pVM The VM.
669 */
670VMMR3DECL(int) PGMR3MappingsDisable(PVM pVM)
671{
672 uint32_t cb;
673 int rc = PGMR3MappingsSize(pVM, &cb);
674 AssertRCReturn(rc, rc);
675
676 /* Only applies to VCPU 0. */
677 PVMCPU pVCpu = &pVM->aCpus[0];
678
679 rc = pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.pShwPageCR3R3);
680 AssertRCReturn(rc, rc);
681
682 /*
683 * Mark the mappings as fixed (using fake values) and disabled.
684 */
685 pVM->pgm.s.fDisableMappings = true;
686 pVM->pgm.s.fMappingsFixed = true;
687 pVM->pgm.s.GCPtrMappingFixed = MM_HYPER_AREA_ADDRESS;
688 pVM->pgm.s.cbMappingFixed = cb;
689 for (unsigned i=0;i<pVM->cCPUs;i++)
690 {
691 PVMCPU pVCpu = &pVM->aCpus[i];
692
693 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
694 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
695 }
696 return VINF_SUCCESS;
697}
698
699
700/**
701 * Unfixes the mappings.
702 * After calling this function mapping conflict detection will be enabled.
703 *
704 * @returns VBox status code.
705 * @param pVM The VM.
706 */
707VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
708{
709 Log(("PGMR3MappingsUnfix: fMappingsFixed=%d\n", pVM->pgm.s.fMappingsFixed));
710
711 /* Ignore in VT-x/AMD-V mode. */
712 if (HWACCMR3IsActive(pVM))
713 return VINF_SUCCESS;
714
715 pVM->pgm.s.fMappingsFixed = false;
716 pVM->pgm.s.GCPtrMappingFixed = 0;
717 pVM->pgm.s.cbMappingFixed = 0;
718 for (unsigned i=0;i<pVM->cCPUs;i++)
719 {
720 PVMCPU pVCpu = &pVM->aCpus[i];
721
722 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
723 }
724 return VINF_SUCCESS;
725}
726
727
728/**
729 * Map pages into the intermediate context (switcher code).
730 * These pages are mapped at both the give virtual address and at
731 * the physical address (for identity mapping).
732 *
733 * @returns VBox status code.
734 * @param pVM The virtual machine.
735 * @param Addr Intermediate context address of the mapping.
736 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!
737 * @param cbPages Number of bytes to map.
738 *
739 * @remark This API shall not be used to anything but mapping the switcher code.
740 */
741VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)
742{
743 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%RHp cbPages=%#x\n", Addr, HCPhys, cbPages));
744
745 /*
746 * Adjust input.
747 */
748 cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;
749 cbPages = RT_ALIGN(cbPages, PAGE_SIZE);
750 HCPhys &= X86_PTE_PAE_PG_MASK;
751 Addr &= PAGE_BASE_MASK;
752 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */
753 uint32_t uAddress = (uint32_t)Addr;
754
755 /*
756 * Assert input and state.
757 */
758 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
759 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));
760 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));
761 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages));
762 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
763
764 /*
765 * Check for internal conflicts between the virtual address and the physical address.
766 * A 1:1 mapping is fine, but partial overlapping is a no-no.
767 */
768 if ( uAddress != HCPhys
769 && ( uAddress < HCPhys
770 ? HCPhys - uAddress < cbPages
771 : uAddress - HCPhys < cbPages
772 )
773 )
774 AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages),
775 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
776
777 const unsigned cPages = cbPages >> PAGE_SHIFT;
778 int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
779 if (RT_FAILURE(rc))
780 return rc;
781 rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
782 if (RT_FAILURE(rc))
783 return rc;
784
785 /*
786 * Everythings fine, do the mapping.
787 */
788 pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
789 pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
790
791 return VINF_SUCCESS;
792}
793
794
795/**
796 * Validates that there are no conflicts for this mapping into the intermediate context.
797 *
798 * @returns VBox status code.
799 * @param pVM VM handle.
800 * @param uAddress Address of the mapping.
801 * @param cPages Number of pages.
802 * @param pPTDefault Pointer to the default page table for this mapping.
803 * @param pPTPaeDefault Pointer to the default page table for this mapping.
804 */
805static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
806{
807 AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme\n"));
808
809 /*
810 * Check that the ranges are available.
811 * (This code doesn't have to be fast.)
812 */
813 while (cPages > 0)
814 {
815 /*
816 * 32-Bit.
817 */
818 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
819 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
820 PX86PT pPT = pPTDefault;
821 if (pVM->pgm.s.pInterPD->a[iPDE].u)
822 {
823 RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;
824 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))
825 pPT = pVM->pgm.s.apInterPTs[0];
826 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))
827 pPT = pVM->pgm.s.apInterPTs[1];
828 else
829 {
830 /** @todo this must be handled with a relocation of the conflicting mapping!
831 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
832 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
833 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
834 }
835 }
836 if (pPT->a[iPTE].u)
837 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u),
838 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
839
840 /*
841 * PAE.
842 */
843 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
844 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
845 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
846 Assert(iPDPE < 4);
847 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
848 PX86PTPAE pPTPae = pPTPaeDefault;
849 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
850 {
851 RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;
852 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
853 pPTPae = pVM->pgm.s.apInterPaePTs[0];
854 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
855 pPTPae = pVM->pgm.s.apInterPaePTs[1];
856 else
857 {
858 /** @todo this must be handled with a relocation of the conflicting mapping!
859 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
860 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
861 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
862 }
863 }
864 if (pPTPae->a[iPTE].u)
865 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u),
866 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
867
868 /* next */
869 uAddress += PAGE_SIZE;
870 cPages--;
871 }
872
873 return VINF_SUCCESS;
874}
875
876
877
878/**
879 * Sets up the intermediate page tables for a verified mapping.
880 *
881 * @param pVM VM handle.
882 * @param uAddress Address of the mapping.
883 * @param HCPhys The physical address of the page range.
884 * @param cPages Number of pages.
885 * @param pPTDefault Pointer to the default page table for this mapping.
886 * @param pPTPaeDefault Pointer to the default page table for this mapping.
887 */
888static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
889{
890 while (cPages > 0)
891 {
892 /*
893 * 32-Bit.
894 */
895 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
896 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
897 PX86PT pPT;
898 if (pVM->pgm.s.pInterPD->a[iPDE].u)
899 pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);
900 else
901 {
902 pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
903 | (uint32_t)MMPage2Phys(pVM, pPTDefault);
904 pPT = pPTDefault;
905 }
906 pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;
907
908 /*
909 * PAE
910 */
911 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
912 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
913 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
914 Assert(iPDPE < 4);
915 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
916 PX86PTPAE pPTPae;
917 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
918 pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);
919 else
920 {
921 pPTPae = pPTPaeDefault;
922 pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
923 | MMPage2Phys(pVM, pPTPaeDefault);
924 }
925 pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;
926
927 /* next */
928 cPages--;
929 HCPhys += PAGE_SIZE;
930 uAddress += PAGE_SIZE;
931 }
932}
933
934
935/**
936 * Clears all PDEs involved with the mapping in the shadow and intermediate page tables.
937 *
938 * @param pVM The VM handle.
939 * @param pMap Pointer to the mapping in question.
940 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
941 */
942static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
943{
944 unsigned i = pMap->cPTs;
945 PVMCPU pVCpu = VMMGetCpu(pVM);
946
947 pgmMapClearShadowPDEs(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE, false /*fDeactivateCR3*/);
948
949 iOldPDE += i;
950 while (i-- > 0)
951 {
952 iOldPDE--;
953
954 /*
955 * 32-bit.
956 */
957 pVM->pgm.s.pInterPD->a[iOldPDE].u = 0;
958 /*
959 * PAE.
960 */
961 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
962 unsigned iPDE = iOldPDE * 2 % 512;
963 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
964 iPDE++;
965 AssertFatal(iPDE < 512);
966 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
967 }
968}
969
970/**
971 * Sets all PDEs involved with the mapping in the shadow and intermediate page tables.
972 *
973 * @param pVM The VM handle.
974 * @param pMap Pointer to the mapping in question.
975 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
976 */
977static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
978{
979 PPGM pPGM = &pVM->pgm.s;
980 PVMCPU pVCpu = VMMGetCpu(pVM);
981
982 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVCpu) <= PGMMODE_PAE_NX);
983
984 pgmMapSetShadowPDEs(pVM, pMap, iNewPDE);
985
986 /*
987 * Init the page tables and insert them into the page directories.
988 */
989 unsigned i = pMap->cPTs;
990 iNewPDE += i;
991 while (i-- > 0)
992 {
993 iNewPDE--;
994
995 /*
996 * 32-bit.
997 */
998 X86PDE Pde;
999 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
1000 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
1001 pPGM->pInterPD->a[iNewPDE] = Pde;
1002 /*
1003 * PAE.
1004 */
1005 const unsigned iPD = iNewPDE / 256;
1006 unsigned iPDE = iNewPDE * 2 % 512;
1007 X86PDEPAE PdePae0;
1008 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
1009 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
1010 iPDE++;
1011 AssertFatal(iPDE < 512);
1012 X86PDEPAE PdePae1;
1013 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
1014 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
1015 }
1016}
1017
1018/**
1019 * Relocates a mapping to a new address.
1020 *
1021 * @param pVM VM handle.
1022 * @param pMapping The mapping to relocate.
1023 * @param GCPtrOldMapping The address of the start of the old mapping.
1024 * @param GCPtrNewMapping The address of the start of the new mapping.
1025 */
1026void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping)
1027{
1028 unsigned iPDOld = GCPtrOldMapping >> X86_PD_SHIFT;
1029 unsigned iPDNew = GCPtrNewMapping >> X86_PD_SHIFT;
1030
1031 Log(("PGM: Relocating %s from %RGv to %RGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping));
1032 AssertMsg(((unsigned)iPDOld << X86_PD_SHIFT) == pMapping->GCPtr, ("%RGv vs %RGv\n", (RTGCPTR)((unsigned)iPDOld << X86_PD_SHIFT), pMapping->GCPtr));
1033
1034 /*
1035 * Relocate the page table(s).
1036 */
1037 pgmR3MapClearPDEs(pVM, pMapping, iPDOld);
1038 pgmR3MapSetPDEs(pVM, pMapping, iPDNew);
1039
1040 /*
1041 * Update and resort the mapping list.
1042 */
1043
1044 /* Find previous mapping for pMapping, put result into pPrevMap. */
1045 PPGMMAPPING pPrevMap = NULL;
1046 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
1047 while (pCur && pCur != pMapping)
1048 {
1049 /* next */
1050 pPrevMap = pCur;
1051 pCur = pCur->pNextR3;
1052 }
1053 Assert(pCur);
1054
1055 /* Find mapping which >= than pMapping. */
1056 RTGCPTR GCPtrNew = iPDNew << X86_PD_SHIFT;
1057 PPGMMAPPING pPrev = NULL;
1058 pCur = pVM->pgm.s.pMappingsR3;
1059 while (pCur && pCur->GCPtr < GCPtrNew)
1060 {
1061 /* next */
1062 pPrev = pCur;
1063 pCur = pCur->pNextR3;
1064 }
1065
1066 if (pCur != pMapping && pPrev != pMapping)
1067 {
1068 /*
1069 * Unlink.
1070 */
1071 if (pPrevMap)
1072 {
1073 pPrevMap->pNextR3 = pMapping->pNextR3;
1074 pPrevMap->pNextRC = pMapping->pNextRC;
1075 pPrevMap->pNextR0 = pMapping->pNextR0;
1076 }
1077 else
1078 {
1079 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
1080 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
1081 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
1082 }
1083
1084 /*
1085 * Link
1086 */
1087 pMapping->pNextR3 = pCur;
1088 if (pPrev)
1089 {
1090 pMapping->pNextRC = pPrev->pNextRC;
1091 pMapping->pNextR0 = pPrev->pNextR0;
1092 pPrev->pNextR3 = pMapping;
1093 pPrev->pNextRC = MMHyperR3ToRC(pVM, pMapping);
1094 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
1095 }
1096 else
1097 {
1098 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
1099 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
1100 pVM->pgm.s.pMappingsR3 = pMapping;
1101 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
1102 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
1103 }
1104 }
1105
1106 /*
1107 * Update the entry.
1108 */
1109 pMapping->GCPtr = GCPtrNew;
1110 pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;
1111
1112 /*
1113 * Callback to execute the relocation.
1114 */
1115 pMapping->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);
1116}
1117
1118
1119/**
1120 * Checks if a new mapping address wasn't previously used and caused a clash with guest mappings.
1121 *
1122 * @returns VBox status code.
1123 * @param pMapping The mapping which conflicts.
1124 * @param GCPtr New mapping address to try
1125 */
1126bool pgmR3MapIsKnownConflictAddress(PPGMMAPPING pMapping, RTGCPTR GCPtr)
1127{
1128 for (unsigned i = 0; i < RT_ELEMENTS(pMapping->aGCPtrConflicts); i++)
1129 {
1130 if (GCPtr == pMapping->aGCPtrConflicts[i])
1131 return true;
1132 }
1133 return false;
1134}
1135
1136
1137/**
1138 * Resolves a conflict between a page table based GC mapping and
1139 * the Guest OS page tables. (32 bits version)
1140 *
1141 * @returns VBox status code.
1142 * @param pVM VM Handle.
1143 * @param pMapping The mapping which conflicts.
1144 * @param pPDSrc The page directory of the guest OS.
1145 * @param GCPtrOldMapping The address of the start of the current mapping.
1146 */
1147int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping)
1148{
1149 STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);
1150 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1151
1152 /* Raw mode only which implies one VCPU. */
1153 Assert(pVM->cCPUs == 1);
1154
1155 pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
1156 pMapping->cConflicts++;
1157
1158 /*
1159 * Scan for free page directory entries.
1160 *
1161 * Note that we do not support mappings at the very end of the
1162 * address space since that will break our GCPtrEnd assumptions.
1163 */
1164 const unsigned cPTs = pMapping->cPTs;
1165 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1166 while (iPDNew-- > 0)
1167 {
1168 if (pPDSrc->a[iPDNew].n.u1Present)
1169 continue;
1170
1171 if (pgmR3MapIsKnownConflictAddress(pMapping, iPDNew << X86_PD_SHIFT))
1172 continue;
1173
1174 if (cPTs > 1)
1175 {
1176 bool fOk = true;
1177 for (unsigned i = 1; fOk && i < cPTs; i++)
1178 if (pPDSrc->a[iPDNew + i].n.u1Present)
1179 fOk = false;
1180 if (!fOk)
1181 continue;
1182 }
1183
1184 /*
1185 * Check that it's not conflicting with an intermediate page table mapping.
1186 */
1187 bool fOk = true;
1188 unsigned i = cPTs;
1189 while (fOk && i-- > 0)
1190 fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;
1191 if (!fOk)
1192 continue;
1193 /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */
1194
1195 /*
1196 * Ask for the mapping.
1197 */
1198 RTGCPTR GCPtrNewMapping = iPDNew << X86_PD_SHIFT;
1199
1200 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1201 {
1202 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1203 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1204 return VINF_SUCCESS;
1205 }
1206 }
1207
1208 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1209 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs));
1210 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1211}
1212
1213
1214/**
1215 * Resolves a conflict between a page table based GC mapping and
1216 * the Guest OS page tables. (PAE bits version)
1217 *
1218 * @returns VBox status code.
1219 * @param pVM VM Handle.
1220 * @param pMapping The mapping which conflicts.
1221 * @param GCPtrOldMapping The address of the start of the current mapping.
1222 */
1223int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping)
1224{
1225 STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);
1226 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1227
1228 /* Raw mode only which implies one VCPU. */
1229 Assert(pVM->cCPUs == 1);
1230 PVMCPU pVCpu = VMMGetCpu(pVM);
1231
1232 pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
1233 pMapping->cConflicts++;
1234
1235 for (int iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--)
1236 {
1237 unsigned iPDSrc;
1238 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
1239
1240 /*
1241 * Scan for free page directory entries.
1242 *
1243 * Note that we do not support mappings at the very end of the
1244 * address space since that will break our GCPtrEnd assumptions.
1245 * Nor do we support mappings crossing page directories.
1246 */
1247 const unsigned cPTs = pMapping->cb >> X86_PD_PAE_SHIFT;
1248 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1249
1250 while (iPDNew-- > 0)
1251 {
1252 /* Ugly assumption that mappings start on a 4 MB boundary. */
1253 if (iPDNew & 1)
1254 continue;
1255
1256 if (pgmR3MapIsKnownConflictAddress(pMapping, ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT)))
1257 continue;
1258
1259 if (pPDSrc)
1260 {
1261 if (pPDSrc->a[iPDNew].n.u1Present)
1262 continue;
1263 if (cPTs > 1)
1264 {
1265 bool fOk = true;
1266 for (unsigned i = 1; fOk && i < cPTs; i++)
1267 if (pPDSrc->a[iPDNew + i].n.u1Present)
1268 fOk = false;
1269 if (!fOk)
1270 continue;
1271 }
1272 }
1273 /*
1274 * Check that it's not conflicting with an intermediate page table mapping.
1275 */
1276 bool fOk = true;
1277 unsigned i = cPTs;
1278 while (fOk && i-- > 0)
1279 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present;
1280 if (!fOk)
1281 continue;
1282
1283 /*
1284 * Ask for the mapping.
1285 */
1286 RTGCPTR GCPtrNewMapping = ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT);
1287
1288 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1289 {
1290 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1291 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1292 return VINF_SUCCESS;
1293 }
1294 }
1295 }
1296 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1297 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT));
1298 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1299}
1300
1301
1302/**
1303 * Read memory from the guest mappings.
1304 *
1305 * This will use the page tables associated with the mappings to
1306 * read the memory. This means that not all kind of memory is readable
1307 * since we don't necessarily know how to convert that physical address
1308 * to a HC virtual one.
1309 *
1310 * @returns VBox status.
1311 * @param pVM VM handle.
1312 * @param pvDst The destination address (HC of course).
1313 * @param GCPtrSrc The source address (GC virtual address).
1314 * @param cb Number of bytes to read.
1315 *
1316 * @remarks The is indirectly for DBGF only.
1317 * @todo Consider renaming it to indicate it's special usage, or just
1318 * reimplement it in MMR3HyperReadGCVirt.
1319 */
1320VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1321{
1322 /*
1323 * Simplicity over speed... Chop the request up into chunks
1324 * which don't cross pages.
1325 */
1326 if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)
1327 {
1328 for (;;)
1329 {
1330 size_t cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));
1331 int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);
1332 if (RT_FAILURE(rc))
1333 return rc;
1334 cb -= cbRead;
1335 if (!cb)
1336 break;
1337 pvDst = (char *)pvDst + cbRead;
1338 GCPtrSrc += cbRead;
1339 }
1340 return VINF_SUCCESS;
1341 }
1342
1343 /*
1344 * Find the mapping.
1345 */
1346 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
1347 while (pCur)
1348 {
1349 RTGCPTR off = GCPtrSrc - pCur->GCPtr;
1350 if (off < pCur->cb)
1351 {
1352 if (off + cb > pCur->cb)
1353 {
1354 AssertMsgFailed(("Invalid page range %RGv LB%#x. mapping '%s' %RGv to %RGv\n",
1355 GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));
1356 return VERR_INVALID_PARAMETER;
1357 }
1358
1359 unsigned iPT = off >> X86_PD_SHIFT;
1360 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
1361 while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))
1362 {
1363 if (!CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].n.u1Present)
1364 return VERR_PAGE_NOT_PRESENT;
1365 RTHCPHYS HCPhys = CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].u & X86_PTE_PAE_PG_MASK;
1366
1367 /*
1368 * Get the virtual page from the physical one.
1369 */
1370 void *pvPage;
1371 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);
1372 if (RT_FAILURE(rc))
1373 return rc;
1374
1375 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
1376 return VINF_SUCCESS;
1377 }
1378 }
1379
1380 /* next */
1381 pCur = CTXALLSUFF(pCur->pNext);
1382 }
1383
1384 return VERR_INVALID_POINTER;
1385}
1386
1387
1388/**
1389 * Info callback for 'pgmhandlers'.
1390 *
1391 * @param pHlp The output helpers.
1392 * @param pszArgs The arguments. phys or virt.
1393 */
1394DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1395{
1396 pHlp->pfnPrintf(pHlp, pVM->pgm.s.fMappingsFixed
1397 ? "\nThe mappings are FIXED.\n"
1398 : "\nThe mappings are FLOATING.\n");
1399 PPGMMAPPING pCur;
1400 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1401 pHlp->pfnPrintf(pHlp, "%RGv - %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
1402}
1403
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette