VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 18620

Last change on this file since 18620 was 18620, checked in by vboxsync, 16 years ago

exception.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 132.3 KB
Line 
1/* $Id: PGMPhys.cpp 18620 2009-04-01 22:22:45Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM_PHYS
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/csam.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <VBox/log.h>
44#include <iprt/thread.h>
45#include <iprt/string.h>
46
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** The number of pages to free in one batch. */
52#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
53
54
55/*******************************************************************************
56* Internal Functions *
57*******************************************************************************/
58static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
59static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
60
61
62/*
63 * PGMR3PhysReadU8-64
64 * PGMR3PhysWriteU8-64
65 */
66#define PGMPHYSFN_READNAME PGMR3PhysReadU8
67#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
68#define PGMPHYS_DATASIZE 1
69#define PGMPHYS_DATATYPE uint8_t
70#include "PGMPhysRWTmpl.h"
71
72#define PGMPHYSFN_READNAME PGMR3PhysReadU16
73#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
74#define PGMPHYS_DATASIZE 2
75#define PGMPHYS_DATATYPE uint16_t
76#include "PGMPhysRWTmpl.h"
77
78#define PGMPHYSFN_READNAME PGMR3PhysReadU32
79#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
80#define PGMPHYS_DATASIZE 4
81#define PGMPHYS_DATATYPE uint32_t
82#include "PGMPhysRWTmpl.h"
83
84#define PGMPHYSFN_READNAME PGMR3PhysReadU64
85#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
86#define PGMPHYS_DATASIZE 8
87#define PGMPHYS_DATATYPE uint64_t
88#include "PGMPhysRWTmpl.h"
89
90
91/**
92 * EMT worker for PGMR3PhysReadExternal.
93 */
94static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
95{
96 PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead);
97 return VINF_SUCCESS;
98}
99
100
101/**
102 * Write to physical memory, external users.
103 *
104 * @returns VBox status code.
105 * @retval VINF_SUCCESS.
106 *
107 * @param pVM VM Handle.
108 * @param GCPhys Physical address to write to.
109 * @param pvBuf What to write.
110 * @param cbWrite How many bytes to write.
111 *
112 * @thread Any but EMTs.
113 */
114VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
115{
116 VM_ASSERT_OTHER_THREAD(pVM);
117
118 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
119 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
120
121 pgmLock(pVM);
122
123 /*
124 * Copy loop on ram ranges.
125 */
126 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
127 for (;;)
128 {
129 /* Find range. */
130 while (pRam && GCPhys > pRam->GCPhysLast)
131 pRam = pRam->CTX_SUFF(pNext);
132 /* Inside range or not? */
133 if (pRam && GCPhys >= pRam->GCPhys)
134 {
135 /*
136 * Must work our way thru this page by page.
137 */
138 RTGCPHYS off = GCPhys - pRam->GCPhys;
139 while (off < pRam->cb)
140 {
141 unsigned iPage = off >> PAGE_SHIFT;
142 PPGMPAGE pPage = &pRam->aPages[iPage];
143
144 /*
145 * If the page has an ALL access handler, we'll have to
146 * delegate the job to EMT.
147 */
148 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
149 {
150 pgmUnlock(pVM);
151
152 PVMREQ pReq = NULL;
153 int rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
154 (PFNRT)pgmR3PhysReadExternalEMT, 4, pVM, &GCPhys, pvBuf, cbRead);
155 if (RT_SUCCESS(rc))
156 {
157 rc = pReq->iStatus;
158 VMR3ReqFree(pReq);
159 }
160 return rc;
161 }
162 Assert(!PGM_PAGE_IS_MMIO(pPage));
163
164 /*
165 * Simple stuff, go ahead.
166 */
167 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
168 if (cb > cbRead)
169 cb = cbRead;
170 const void *pvSrc;
171 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
172 if (RT_SUCCESS(rc))
173 memcpy(pvBuf, pvSrc, cb);
174 else
175 {
176 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
177 pRam->GCPhys + off, pPage, rc));
178 memset(pvBuf, 0xff, cb);
179 }
180
181 /* next page */
182 if (cb >= cbRead)
183 {
184 pgmUnlock(pVM);
185 return VINF_SUCCESS;
186 }
187 cbRead -= cb;
188 off += cb;
189 GCPhys += cb;
190 pvBuf = (char *)pvBuf + cb;
191 } /* walk pages in ram range. */
192 }
193 else
194 {
195 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
196
197 /*
198 * Unassigned address space.
199 */
200 if (!pRam)
201 break;
202 size_t cb = pRam->GCPhys - GCPhys;
203 if (cb >= cbRead)
204 {
205 memset(pvBuf, 0xff, cbRead);
206 break;
207 }
208 memset(pvBuf, 0xff, cb);
209
210 cbRead -= cb;
211 pvBuf = (char *)pvBuf + cb;
212 GCPhys += cb;
213 }
214 } /* Ram range walk */
215
216 pgmUnlock(pVM);
217
218 return VINF_SUCCESS;
219}
220
221
222/**
223 * EMT worker for PGMR3PhysWriteExternal.
224 */
225static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
226{
227 /** @todo VERR_EM_NO_MEMORY */
228 PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite);
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Write to physical memory, external users.
235 *
236 * @returns VBox status code.
237 * @retval VINF_SUCCESS.
238 * @retval VERR_EM_NO_MEMORY.
239 *
240 * @param pVM VM Handle.
241 * @param GCPhys Physical address to write to.
242 * @param pvBuf What to write.
243 * @param cbWrite How many bytes to write.
244 *
245 * @thread Any but EMTs.
246 */
247VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
248{
249 VM_ASSERT_OTHER_THREAD(pVM);
250
251 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMR3PhysWriteExternal after pgmR3Save()!\n"));
252 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
253 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
254
255 pgmLock(pVM);
256
257 /*
258 * Copy loop on ram ranges, stop when we hit something difficult.
259 */
260 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
261 for (;;)
262 {
263 /* Find range. */
264 while (pRam && GCPhys > pRam->GCPhysLast)
265 pRam = pRam->CTX_SUFF(pNext);
266 /* Inside range or not? */
267 if (pRam && GCPhys >= pRam->GCPhys)
268 {
269 /*
270 * Must work our way thru this page by page.
271 */
272 RTGCPTR off = GCPhys - pRam->GCPhys;
273 while (off < pRam->cb)
274 {
275 RTGCPTR iPage = off >> PAGE_SHIFT;
276 PPGMPAGE pPage = &pRam->aPages[iPage];
277
278 /*
279 * It the page is in any way problematic, we have to
280 * do the work on the EMT. Anything that needs to be made
281 * writable or involves access handlers is problematic.
282 */
283 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
284 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
285 {
286 pgmUnlock(pVM);
287
288 PVMREQ pReq = NULL;
289 int rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
290 (PFNRT)pgmR3PhysWriteExternalEMT, 4, pVM, &GCPhys, pvBuf, cbWrite);
291 if (RT_SUCCESS(rc))
292 {
293 rc = pReq->iStatus;
294 VMR3ReqFree(pReq);
295 }
296 return rc;
297 }
298 Assert(!PGM_PAGE_IS_MMIO(pPage));
299
300 /*
301 * Simple stuff, go ahead.
302 */
303 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
304 if (cb > cbWrite)
305 cb = cbWrite;
306 void *pvDst;
307 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
308 if (RT_SUCCESS(rc))
309 memcpy(pvDst, pvBuf, cb);
310 else
311 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
312 pRam->GCPhys + off, pPage, rc));
313
314 /* next page */
315 if (cb >= cbWrite)
316 {
317 pgmUnlock(pVM);
318 return VINF_SUCCESS;
319 }
320
321 cbWrite -= cb;
322 off += cb;
323 GCPhys += cb;
324 pvBuf = (const char *)pvBuf + cb;
325 } /* walk pages in ram range */
326 }
327 else
328 {
329 /*
330 * Unassigned address space, skip it.
331 */
332 if (!pRam)
333 break;
334 size_t cb = pRam->GCPhys - GCPhys;
335 if (cb >= cbWrite)
336 break;
337 cbWrite -= cb;
338 pvBuf = (const char *)pvBuf + cb;
339 GCPhys += cb;
340 }
341 } /* Ram range walk */
342
343 pgmUnlock(pVM);
344 return VINF_SUCCESS;
345}
346
347
348#ifdef VBOX_WITH_NEW_PHYS_CODE
349/**
350 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
351 *
352 * @returns see PGMR3PhysGCPhys2CCPtrExternal
353 * @param pVM The VM handle.
354 * @param pGCPhys Pointer to the guest physical address.
355 * @param ppv Where to store the mapping address.
356 * @param pLock Where to store the lock.
357 */
358static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
359{
360 /*
361 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
362 * an access handler after it succeeds.
363 */
364 int rc = pgmLock(pVM);
365 AssertRCReturn(rc, rc);
366
367 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
368 if (RT_SUCCESS(rc))
369 {
370 PPGMPAGEMAPTLBE pTlbe;
371 int rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, *pGCPhys, &pTlbe);
372 AssertFatalRC(rc2);
373 PPGMPAGE pPage = pTlbe->pPage;
374#if 1
375 if (PGM_PAGE_IS_MMIO(pPage))
376#else
377 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
378#endif
379 {
380 PGMPhysReleasePageMappingLock(pVM, pLock);
381 rc = VERR_PGM_PHYS_PAGE_RESERVED;
382 }
383 }
384
385 pgmUnlock(pVM);
386 return rc;
387}
388#endif /* VBOX_WITH_NEW_PHYS_CODE */
389
390
391/**
392 * Requests the mapping of a guest page into ring-3, external threads.
393 *
394 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
395 * release it.
396 *
397 * This API will assume your intention is to write to the page, and will
398 * therefore replace shared and zero pages. If you do not intend to modify the
399 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
400 *
401 * @returns VBox status code.
402 * @retval VINF_SUCCESS on success.
403 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
404 * backing or if the page has any active access handlers. The caller
405 * must fall back on using PGMR3PhysWriteExternal.
406 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
407 *
408 * @param pVM The VM handle.
409 * @param GCPhys The guest physical address of the page that should be mapped.
410 * @param ppv Where to store the address corresponding to GCPhys.
411 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
412 *
413 * @remark Avoid calling this API from within critical sections (other than the
414 * PGM one) because of the deadlock risk when we have to delegating the
415 * task to an EMT.
416 * @thread Any.
417 */
418VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
419{
420 AssertPtr(ppv);
421 AssertPtr(pLock);
422
423#ifdef VBOX_WITH_NEW_PHYS_CODE
424 int rc = pgmLock(pVM);
425 AssertRCReturn(rc, rc);
426
427 /*
428 * Query the Physical TLB entry for the page (may fail).
429 */
430 PPGMPAGEMAPTLBE pTlbe;
431 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
432 if (RT_SUCCESS(rc))
433 {
434 PPGMPAGE pPage = pTlbe->pPage;
435#if 1
436 if (PGM_PAGE_IS_MMIO(pPage))
437 rc = VERR_PGM_PHYS_PAGE_RESERVED;
438#else
439 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
440 rc = VERR_PGM_PHYS_PAGE_RESERVED;
441#endif
442 else
443 {
444 /*
445 * If the page is shared, the zero page, or being write monitored
446 * it must be converted to an page that's writable if possible.
447 * This has to be done on an EMT.
448 */
449 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
450 {
451 pgmUnlock(pVM);
452
453 PVMREQ pReq = NULL;
454 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
455 (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4, pVM, &GCPhys, ppv, pLock);
456 if (RT_SUCCESS(rc))
457 {
458 rc = pReq->iStatus;
459 VMR3ReqFree(pReq);
460 }
461 return rc;
462 }
463
464 /*
465 * Now, just perform the locking and calculate the return address.
466 */
467 PPGMPAGEMAP pMap = pTlbe->pMap;
468 pMap->cRefs++;
469#if 0 /** @todo implement locking properly */
470 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
471 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
472 {
473 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
474 pMap->cRefs++; /* Extra ref to prevent it from going away. */
475 }
476#endif
477 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
478 pLock->pvPage = pPage;
479 pLock->pvMap = pMap;
480 }
481 }
482
483 pgmUnlock(pVM);
484 return rc;
485
486#else /* !VBOX_WITH_NEW_PHYS_CODE */
487 /*
488 * Fallback code.
489 */
490 return PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1, (PRTR3PTR)ppv);
491#endif /* !VBOX_WITH_NEW_PHYS_CODE */
492}
493
494
495/**
496 * Requests the mapping of a guest page into ring-3, external threads.
497 *
498 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
499 * release it.
500 *
501 * @returns VBox status code.
502 * @retval VINF_SUCCESS on success.
503 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
504 * backing or if the page as an active ALL access handler. The caller
505 * must fall back on using PGMPhysRead.
506 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
507 *
508 * @param pVM The VM handle.
509 * @param GCPhys The guest physical address of the page that should be mapped.
510 * @param ppv Where to store the address corresponding to GCPhys.
511 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
512 *
513 * @remark Avoid calling this API from within critical sections (other than
514 * the PGM one) because of the deadlock risk.
515 * @thread Any.
516 */
517VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
518{
519#ifdef VBOX_WITH_NEW_PHYS_CODE
520 int rc = pgmLock(pVM);
521 AssertRCReturn(rc, rc);
522
523 /*
524 * Query the Physical TLB entry for the page (may fail).
525 */
526 PPGMPAGEMAPTLBE pTlbe;
527 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
528 if (RT_SUCCESS(rc))
529 {
530 PPGMPAGE pPage = pTlbe->pPage;
531#if 1
532 /* MMIO pages doesn't have any readable backing. */
533 if (PGM_PAGE_IS_MMIO(pPage))
534 rc = VERR_PGM_PHYS_PAGE_RESERVED;
535#else
536 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
537 rc = VERR_PGM_PHYS_PAGE_RESERVED;
538#endif
539 else
540 {
541 /*
542 * Now, just perform the locking and calculate the return address.
543 */
544 PPGMPAGEMAP pMap = pTlbe->pMap;
545 pMap->cRefs++;
546#if 0 /** @todo implement locking properly */
547 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
548 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
549 {
550 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
551 pMap->cRefs++; /* Extra ref to prevent it from going away. */
552 }
553#endif
554 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
555 pLock->pvPage = pPage;
556 pLock->pvMap = pMap;
557 }
558 }
559
560 pgmUnlock(pVM);
561 return rc;
562
563#else /* !VBOX_WITH_NEW_PHYS_CODE */
564 /*
565 * Fallback code.
566 */
567 return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
568#endif /* !VBOX_WITH_NEW_PHYS_CODE */
569}
570
571
572/**
573 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
574 *
575 * Called when anything was relocated.
576 *
577 * @param pVM Pointer to the shared VM structure.
578 */
579void pgmR3PhysRelinkRamRanges(PVM pVM)
580{
581 PPGMRAMRANGE pCur;
582
583#ifdef VBOX_STRICT
584 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
585 {
586 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
587 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
588 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
589 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
590 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
591 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
592 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesR3; pCur2; pCur2 = pCur2->pNextR3)
593 Assert( pCur2 == pCur
594 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
595 }
596#endif
597
598 pCur = pVM->pgm.s.pRamRangesR3;
599 if (pCur)
600 {
601 pVM->pgm.s.pRamRangesR0 = pCur->pSelfR0;
602 pVM->pgm.s.pRamRangesRC = pCur->pSelfRC;
603
604 for (; pCur->pNextR3; pCur = pCur->pNextR3)
605 {
606 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
607 pCur->pNextRC = pCur->pNextR3->pSelfRC;
608 }
609
610 Assert(pCur->pNextR0 == NIL_RTR0PTR);
611 Assert(pCur->pNextRC == NIL_RTRCPTR);
612 }
613 else
614 {
615 Assert(pVM->pgm.s.pRamRangesR0 == NIL_RTR0PTR);
616 Assert(pVM->pgm.s.pRamRangesRC == NIL_RTRCPTR);
617 }
618}
619
620
621/**
622 * Links a new RAM range into the list.
623 *
624 * @param pVM Pointer to the shared VM structure.
625 * @param pNew Pointer to the new list entry.
626 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
627 */
628static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
629{
630 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
631 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
632 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
633
634 pgmLock(pVM);
635
636 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
637 pNew->pNextR3 = pRam;
638 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
639 pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
640
641 if (pPrev)
642 {
643 pPrev->pNextR3 = pNew;
644 pPrev->pNextR0 = pNew->pSelfR0;
645 pPrev->pNextRC = pNew->pSelfRC;
646 }
647 else
648 {
649 pVM->pgm.s.pRamRangesR3 = pNew;
650 pVM->pgm.s.pRamRangesR0 = pNew->pSelfR0;
651 pVM->pgm.s.pRamRangesRC = pNew->pSelfRC;
652 }
653
654 pgmUnlock(pVM);
655}
656
657
658/**
659 * Unlink an existing RAM range from the list.
660 *
661 * @param pVM Pointer to the shared VM structure.
662 * @param pRam Pointer to the new list entry.
663 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
664 */
665static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
666{
667 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
668 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
669 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
670
671 pgmLock(pVM);
672
673 PPGMRAMRANGE pNext = pRam->pNextR3;
674 if (pPrev)
675 {
676 pPrev->pNextR3 = pNext;
677 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
678 pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
679 }
680 else
681 {
682 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
683 pVM->pgm.s.pRamRangesR3 = pNext;
684 pVM->pgm.s.pRamRangesR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
685 pVM->pgm.s.pRamRangesRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
686 }
687
688 pgmUnlock(pVM);
689}
690
691
692/**
693 * Unlink an existing RAM range from the list.
694 *
695 * @param pVM Pointer to the shared VM structure.
696 * @param pRam Pointer to the new list entry.
697 */
698static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
699{
700 pgmLock(pVM);
701
702 /* find prev. */
703 PPGMRAMRANGE pPrev = NULL;
704 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
705 while (pCur != pRam)
706 {
707 pPrev = pCur;
708 pCur = pCur->pNextR3;
709 }
710 AssertFatal(pCur);
711
712 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
713
714 pgmUnlock(pVM);
715}
716
717
718#ifdef VBOX_WITH_NEW_PHYS_CODE
719/**
720 * Frees a range of pages, replacing them with ZERO pages of the specified type.
721 *
722 * @returns VBox status code.
723 * @param pVM The VM handle.
724 * @param pRam The RAM range in which the pages resides.
725 * @param GCPhys The address of the first page.
726 * @param GCPhysLast The address of the last page.
727 * @param uType The page type to replace then with.
728 */
729static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
730{
731 uint32_t cPendingPages = 0;
732 PGMMFREEPAGESREQ pReq;
733 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
734 AssertLogRelRCReturn(rc, rc);
735
736 /* Itegerate the pages. */
737 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
738 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
739 while (cPagesLeft-- > 0)
740 {
741 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
742 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
743
744 PGM_PAGE_SET_TYPE(pPageDst, uType);
745
746 GCPhys += PAGE_SIZE;
747 pPageDst++;
748 }
749
750 if (cPendingPages)
751 {
752 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
753 AssertLogRelRCReturn(rc, rc);
754 }
755 GMMR3FreePagesCleanup(pReq);
756
757 return rc;
758}
759#endif /* VBOX_WITH_NEW_PHYS_CODE */
760
761
762/**
763 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
764 *
765 * @param pVM The VM handle.
766 * @param pNew The new RAM range.
767 * @param GCPhys The address of the RAM range.
768 * @param GCPhysLast The last address of the RAM range.
769 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
770 * if in HMA.
771 * @param R0PtrNew Ditto for R0.
772 * @param pszDesc The description.
773 * @param pPrev The previous RAM range (for linking).
774 */
775static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
776 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
777{
778 /*
779 * Initialize the range.
780 */
781 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
782 pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
783 pNew->GCPhys = GCPhys;
784 pNew->GCPhysLast = GCPhysLast;
785 pNew->cb = GCPhysLast - GCPhys + 1;
786 pNew->pszDesc = pszDesc;
787 pNew->fFlags = RCPtrNew != NIL_RTR0PTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
788 pNew->pvR3 = NULL;
789
790 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
791 RTGCPHYS iPage = cPages;
792 while (iPage-- > 0)
793 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
794
795 /* Update the page count stats. */
796 pVM->pgm.s.cZeroPages += cPages;
797 pVM->pgm.s.cAllPages += cPages;
798
799 /*
800 * Link it.
801 */
802 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
803}
804
805
806/**
807 * Relocate a floating RAM range.
808 *
809 * @copydoc FNPGMRELOCATE.
810 */
811static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
812{
813 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
814 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
815 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE);
816
817 switch (enmMode)
818 {
819 case PGMRELOCATECALL_SUGGEST:
820 return true;
821 case PGMRELOCATECALL_RELOCATE:
822 {
823 /* Update myself and then relink all the ranges. */
824 pgmLock(pVM);
825 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
826 pgmR3PhysRelinkRamRanges(pVM);
827 pgmUnlock(pVM);
828 return true;
829 }
830
831 default:
832 AssertFailedReturn(false);
833 }
834}
835
836
837/**
838 * PGMR3PhysRegisterRam worker that registers a high chunk.
839 *
840 * @returns VBox status code.
841 * @param pVM The VM handle.
842 * @param GCPhys The address of the RAM.
843 * @param cRamPages The number of RAM pages to register.
844 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
845 * @param iChunk The chunk number.
846 * @param pszDesc The RAM range description.
847 * @param ppPrev Previous RAM range pointer. In/Out.
848 */
849static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
850 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
851 PPGMRAMRANGE *ppPrev)
852{
853 const char *pszDescChunk = iChunk == 0
854 ? pszDesc
855 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
856 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
857
858 /*
859 * Allocate memory for the new chunk.
860 */
861 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
862 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
863 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
864 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
865 void *pvChunk = NULL;
866 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
867#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
868 VMMIsHwVirtExtForced(pVM) ? &R0PtrChunk : NULL,
869#else
870 NULL,
871#endif
872 paChunkPages);
873 if (RT_SUCCESS(rc))
874 {
875#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
876 if (!VMMIsHwVirtExtForced(pVM))
877 R0PtrChunk = NIL_RTR0PTR;
878#else
879 R0PtrChunk = (uintptr_t)pvChunk;
880#endif
881 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
882
883 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
884
885 /*
886 * Create a mapping and map the pages into it.
887 * We push these in below the HMA.
888 */
889 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
890 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
891 if (RT_SUCCESS(rc))
892 {
893 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
894
895 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
896 RTGCPTR GCPtrPage = GCPtrChunk;
897 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
898 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
899 if (RT_SUCCESS(rc))
900 {
901 /*
902 * Ok, init and link the range.
903 */
904 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
905 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
906 *ppPrev = pNew;
907 }
908 }
909
910 if (RT_FAILURE(rc))
911 SUPR3PageFreeEx(pvChunk, cChunkPages);
912 }
913
914 RTMemTmpFree(paChunkPages);
915 return rc;
916}
917
918
919/**
920 * Sets up a range RAM.
921 *
922 * This will check for conflicting registrations, make a resource
923 * reservation for the memory (with GMM), and setup the per-page
924 * tracking structures (PGMPAGE).
925 *
926 * @returns VBox stutus code.
927 * @param pVM Pointer to the shared VM structure.
928 * @param GCPhys The physical address of the RAM.
929 * @param cb The size of the RAM.
930 * @param pszDesc The description - not copied, so, don't free or change it.
931 */
932VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
933{
934 /*
935 * Validate input.
936 */
937 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
938 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
939 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
940 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
941 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
942 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
943 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
944 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
945
946 /*
947 * Find range location and check for conflicts.
948 * (We don't lock here because the locking by EMT is only required on update.)
949 */
950 PPGMRAMRANGE pPrev = NULL;
951 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
952 while (pRam && GCPhysLast >= pRam->GCPhys)
953 {
954 if ( GCPhysLast >= pRam->GCPhys
955 && GCPhys <= pRam->GCPhysLast)
956 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
957 GCPhys, GCPhysLast, pszDesc,
958 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
959 VERR_PGM_RAM_CONFLICT);
960
961 /* next */
962 pPrev = pRam;
963 pRam = pRam->pNextR3;
964 }
965
966 /*
967 * Register it with GMM (the API bitches).
968 */
969 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
970 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
971 if (RT_FAILURE(rc))
972 return rc;
973
974#ifdef VBOX_WITH_NEW_PHYS_CODE
975 if ( GCPhys >= _4G
976 && cPages > 256)
977 {
978 /*
979 * The PGMRAMRANGE structures for the high memory can get very big.
980 * In order to avoid SUPR3PageAllocEx allocation failures due to the
981 * allocation size limit there and also to avoid being unable to find
982 * guest mapping space for them, we split this memory up into 4MB in
983 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
984 * mode.
985 *
986 * The first and last page of each mapping are guard pages and marked
987 * not-present. So, we've got 4186112 and 16769024 bytes available for
988 * the PGMRAMRANGE structure.
989 *
990 * Note! The sizes used here will influence the saved state.
991 */
992 uint32_t cbChunk;
993 uint32_t cPagesPerChunk;
994 if (VMMIsHwVirtExtForced(pVM))
995 {
996 cbChunk = 16U*_1M;
997 cPagesPerChunk = 1048048; /* max ~1048059 */
998 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
999 }
1000 else
1001 {
1002 cbChunk = 4U*_1M;
1003 cPagesPerChunk = 261616; /* max ~261627 */
1004 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616 < 4U*_1M - PAGE_SIZE * 2);
1005 }
1006 AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
1007
1008 RTGCPHYS cPagesLeft = cPages;
1009 RTGCPHYS GCPhysChunk = GCPhys;
1010 uint32_t iChunk = 0;
1011 while (cPagesLeft > 0)
1012 {
1013 uint32_t cPagesInChunk = cPagesLeft;
1014 if (cPagesInChunk > cPagesPerChunk)
1015 cPagesInChunk = cPagesPerChunk;
1016
1017 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1018 AssertRCReturn(rc, rc);
1019
1020 /* advance */
1021 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1022 cPagesLeft -= cPagesInChunk;
1023 iChunk++;
1024 }
1025 }
1026 else
1027#endif
1028 {
1029 /*
1030 * Allocate, initialize and link the new RAM range.
1031 */
1032 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1033 PPGMRAMRANGE pNew;
1034 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1035 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1036
1037#ifndef VBOX_WITH_NEW_PHYS_CODE
1038 /* Allocate memory for chunk to HC ptr lookup array. */
1039 pNew->paChunkR3Ptrs = NULL;
1040 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
1041 AssertRCReturn(rc, rc);
1042 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
1043#endif
1044
1045 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1046 }
1047
1048 /*
1049 * Notify REM.
1050 */
1051#ifdef VBOX_WITH_NEW_PHYS_CODE
1052 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
1053#else
1054 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
1055#endif
1056
1057 return VINF_SUCCESS;
1058}
1059
1060
1061/**
1062 * Resets (zeros) the RAM.
1063 *
1064 * ASSUMES that the caller owns the PGM lock.
1065 *
1066 * @returns VBox status code.
1067 * @param pVM Pointer to the shared VM structure.
1068 */
1069int pgmR3PhysRamReset(PVM pVM)
1070{
1071#ifdef VBOX_WITH_NEW_PHYS_CODE
1072 /*
1073 * We batch up pages before freeing them.
1074 */
1075 uint32_t cPendingPages = 0;
1076 PGMMFREEPAGESREQ pReq;
1077 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1078 AssertLogRelRCReturn(rc, rc);
1079#endif
1080
1081 /*
1082 * Walk the ram ranges.
1083 */
1084 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
1085 {
1086 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
1087 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
1088
1089#ifdef VBOX_WITH_NEW_PHYS_CODE
1090 if (!pVM->pgm.s.fRamPreAlloc)
1091 {
1092 /* Replace all RAM pages by ZERO pages. */
1093 while (iPage-- > 0)
1094 {
1095 PPGMPAGE pPage = &pRam->aPages[iPage];
1096 switch (PGM_PAGE_GET_TYPE(pPage))
1097 {
1098 case PGMPAGETYPE_RAM:
1099 if (!PGM_PAGE_IS_ZERO(pPage))
1100 {
1101 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1102 AssertLogRelRCReturn(rc, rc);
1103 }
1104 break;
1105
1106 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1107 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1108 break;
1109
1110 case PGMPAGETYPE_MMIO2:
1111 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
1112 case PGMPAGETYPE_ROM:
1113 case PGMPAGETYPE_MMIO:
1114 break;
1115 default:
1116 AssertFailed();
1117 }
1118 } /* for each page */
1119 }
1120 else
1121#endif
1122 {
1123 /* Zero the memory. */
1124 while (iPage-- > 0)
1125 {
1126 PPGMPAGE pPage = &pRam->aPages[iPage];
1127 switch (PGM_PAGE_GET_TYPE(pPage))
1128 {
1129#ifndef VBOX_WITH_NEW_PHYS_CODE
1130 case PGMPAGETYPE_INVALID:
1131 case PGMPAGETYPE_RAM:
1132 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
1133 {
1134 /* shadow ram is reloaded elsewhere. */
1135 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
1136 continue;
1137 }
1138 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1139 {
1140 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1141 if (pRam->paChunkR3Ptrs[iChunk])
1142 ASMMemZero32((char *)pRam->paChunkR3Ptrs[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
1143 }
1144 else
1145 ASMMemZero32((char *)pRam->pvR3 + (iPage << PAGE_SHIFT), PAGE_SIZE);
1146 break;
1147#else /* VBOX_WITH_NEW_PHYS_CODE */
1148 case PGMPAGETYPE_RAM:
1149 switch (PGM_PAGE_GET_STATE(pPage))
1150 {
1151 case PGM_PAGE_STATE_ZERO:
1152 break;
1153 case PGM_PAGE_STATE_SHARED:
1154 case PGM_PAGE_STATE_WRITE_MONITORED:
1155 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1156 AssertLogRelRCReturn(rc, rc);
1157 case PGM_PAGE_STATE_ALLOCATED:
1158 {
1159 void *pvPage;
1160 PPGMPAGEMAP pMapIgnored;
1161 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pMapIgnored, &pvPage);
1162 AssertLogRelRCReturn(rc, rc);
1163 ASMMemZeroPage(pvPage);
1164 break;
1165 }
1166 }
1167 break;
1168#endif /* VBOX_WITH_NEW_PHYS_CODE */
1169
1170 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1171 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1172 break;
1173
1174 case PGMPAGETYPE_MMIO2:
1175 case PGMPAGETYPE_ROM_SHADOW:
1176 case PGMPAGETYPE_ROM:
1177 case PGMPAGETYPE_MMIO:
1178 break;
1179 default:
1180 AssertFailed();
1181
1182 }
1183 } /* for each page */
1184 }
1185
1186 }
1187
1188#ifdef VBOX_WITH_NEW_PHYS_CODE
1189 /*
1190 * Finish off any pages pending freeing.
1191 */
1192 if (cPendingPages)
1193 {
1194 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1195 AssertLogRelRCReturn(rc, rc);
1196 }
1197 GMMR3FreePagesCleanup(pReq);
1198#endif
1199
1200
1201 return VINF_SUCCESS;
1202}
1203
1204
1205/**
1206 * This is the interface IOM is using to register an MMIO region.
1207 *
1208 * It will check for conflicts and ensure that a RAM range structure
1209 * is present before calling the PGMR3HandlerPhysicalRegister API to
1210 * register the callbacks.
1211 *
1212 * @returns VBox status code.
1213 *
1214 * @param pVM Pointer to the shared VM structure.
1215 * @param GCPhys The start of the MMIO region.
1216 * @param cb The size of the MMIO region.
1217 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
1218 * @param pvUserR3 The user argument for R3.
1219 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
1220 * @param pvUserR0 The user argument for R0.
1221 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
1222 * @param pvUserRC The user argument for RC.
1223 * @param pszDesc The description of the MMIO region.
1224 */
1225VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
1226 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
1227 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
1228 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
1229 R3PTRTYPE(const char *) pszDesc)
1230{
1231 /*
1232 * Assert on some assumption.
1233 */
1234 VM_ASSERT_EMT(pVM);
1235 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1236 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1237 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1238 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1239
1240 /*
1241 * Make sure there's a RAM range structure for the region.
1242 */
1243 int rc;
1244 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1245 bool fRamExists = false;
1246 PPGMRAMRANGE pRamPrev = NULL;
1247 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1248 while (pRam && GCPhysLast >= pRam->GCPhys)
1249 {
1250 if ( GCPhysLast >= pRam->GCPhys
1251 && GCPhys <= pRam->GCPhysLast)
1252 {
1253 /* Simplification: all within the same range. */
1254 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1255 && GCPhysLast <= pRam->GCPhysLast,
1256 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
1257 GCPhys, GCPhysLast, pszDesc,
1258 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1259 VERR_PGM_RAM_CONFLICT);
1260
1261 /* Check that it's all RAM or MMIO pages. */
1262 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1263 uint32_t cLeft = cb >> PAGE_SHIFT;
1264 while (cLeft-- > 0)
1265 {
1266 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
1267 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
1268 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
1269 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
1270 VERR_PGM_RAM_CONFLICT);
1271 pPage++;
1272 }
1273
1274 /* Looks good. */
1275 fRamExists = true;
1276 break;
1277 }
1278
1279 /* next */
1280 pRamPrev = pRam;
1281 pRam = pRam->pNextR3;
1282 }
1283 PPGMRAMRANGE pNew;
1284 if (fRamExists)
1285 {
1286 pNew = NULL;
1287#ifdef VBOX_WITH_NEW_PHYS_CODE
1288 /*
1289 * Make all the pages in the range MMIO/ZERO pages, freeing any
1290 * RAM pages currently mapped here. This might not be 100% correct
1291 * for PCI memory, but we're doing the same thing for MMIO2 pages.
1292 */
1293 rc = pgmLock(pVM);
1294 if (RT_SUCCESS(rc))
1295 {
1296 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
1297 pgmUnlock(pVM);
1298 }
1299 AssertRCReturn(rc, rc);
1300#endif
1301 }
1302 else
1303 {
1304 /*
1305 * No RAM range, insert an ad-hoc one.
1306 *
1307 * Note that we don't have to tell REM about this range because
1308 * PGMHandlerPhysicalRegisterEx will do that for us.
1309 */
1310 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
1311
1312 const uint32_t cPages = cb >> PAGE_SHIFT;
1313 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1314 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
1315 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1316
1317 /* Initialize the range. */
1318 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
1319 pNew->pSelfRC = MMHyperCCToRC(pVM, pNew);
1320 pNew->GCPhys = GCPhys;
1321 pNew->GCPhysLast = GCPhysLast;
1322 pNew->cb = cb;
1323 pNew->pszDesc = pszDesc;
1324 pNew->fFlags = 0; /** @todo add some kind of ad-hoc flag? */
1325
1326 pNew->pvR3 = NULL;
1327#ifndef VBOX_WITH_NEW_PHYS_CODE
1328 pNew->paChunkR3Ptrs = NULL;
1329#endif
1330
1331 uint32_t iPage = cPages;
1332 while (iPage-- > 0)
1333 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
1334 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
1335
1336 /* update the page count stats. */
1337 pVM->pgm.s.cZeroPages += cPages;
1338 pVM->pgm.s.cAllPages += cPages;
1339
1340 /* link it */
1341 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
1342 }
1343
1344 /*
1345 * Register the access handler.
1346 */
1347 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
1348 pfnHandlerR3, pvUserR3,
1349 pfnHandlerR0, pvUserR0,
1350 pfnHandlerRC, pvUserRC, pszDesc);
1351 if ( RT_FAILURE(rc)
1352 && !fRamExists)
1353 {
1354 pVM->pgm.s.cZeroPages -= cb >> PAGE_SHIFT;
1355 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
1356
1357 /* remove the ad-hoc range. */
1358 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
1359 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
1360 MMHyperFree(pVM, pRam);
1361 }
1362
1363 return rc;
1364}
1365
1366
1367/**
1368 * This is the interface IOM is using to register an MMIO region.
1369 *
1370 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
1371 * any ad-hoc PGMRAMRANGE left behind.
1372 *
1373 * @returns VBox status code.
1374 * @param pVM Pointer to the shared VM structure.
1375 * @param GCPhys The start of the MMIO region.
1376 * @param cb The size of the MMIO region.
1377 */
1378VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
1379{
1380 VM_ASSERT_EMT(pVM);
1381
1382 /*
1383 * First deregister the handler, then check if we should remove the ram range.
1384 */
1385 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1386 if (RT_SUCCESS(rc))
1387 {
1388 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1389 PPGMRAMRANGE pRamPrev = NULL;
1390 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1391 while (pRam && GCPhysLast >= pRam->GCPhys)
1392 {
1393 /** @todo We're being a bit too careful here. rewrite. */
1394 if ( GCPhysLast == pRam->GCPhysLast
1395 && GCPhys == pRam->GCPhys)
1396 {
1397 Assert(pRam->cb == cb);
1398
1399 /*
1400 * See if all the pages are dead MMIO pages.
1401 */
1402 uint32_t const cPages = cb >> PAGE_SHIFT;
1403 bool fAllMMIO = true;
1404 uint32_t iPage = 0;
1405 uint32_t cLeft = cPages;
1406 while (cLeft-- > 0)
1407 {
1408 PPGMPAGE pPage = &pRam->aPages[iPage];
1409 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
1410 /*|| not-out-of-action later */)
1411 {
1412 fAllMMIO = false;
1413#ifdef VBOX_WITH_NEW_PHYS_CODE
1414 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1415 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1416#endif
1417 break;
1418 }
1419 Assert(PGM_PAGE_IS_ZERO(pPage));
1420 pPage++;
1421 }
1422 if (fAllMMIO)
1423 {
1424 /*
1425 * Ad-hoc range, unlink and free it.
1426 */
1427 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
1428 GCPhys, GCPhysLast, pRam->pszDesc));
1429
1430 pVM->pgm.s.cAllPages -= cPages;
1431 pVM->pgm.s.cZeroPages -= cPages;
1432
1433 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
1434 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
1435 MMHyperFree(pVM, pRam);
1436 break;
1437 }
1438 }
1439
1440#ifdef VBOX_WITH_NEW_PHYS_CODE
1441 /*
1442 * Range match? It will all be within one range (see PGMAllHandler.cpp).
1443 */
1444 if ( GCPhysLast >= pRam->GCPhys
1445 && GCPhys <= pRam->GCPhysLast)
1446 {
1447 Assert(GCPhys >= pRam->GCPhys);
1448 Assert(GCPhysLast <= pRam->GCPhysLast);
1449
1450 /*
1451 * Turn the pages back into RAM pages.
1452 */
1453 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1454 uint32_t cLeft = cb >> PAGE_SHIFT;
1455 while (cLeft--)
1456 {
1457 PPGMPAGE pPage = &pRam->aPages[iPage];
1458 AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1459 AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1460 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
1461 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_RAM);
1462 }
1463 break;
1464 }
1465#endif
1466
1467 /* next */
1468 pRamPrev = pRam;
1469 pRam = pRam->pNextR3;
1470 }
1471 }
1472
1473 return rc;
1474}
1475
1476
1477/**
1478 * Locate a MMIO2 range.
1479 *
1480 * @returns Pointer to the MMIO2 range.
1481 * @param pVM Pointer to the shared VM structure.
1482 * @param pDevIns The device instance owning the region.
1483 * @param iRegion The region.
1484 */
1485DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1486{
1487 /*
1488 * Search the list.
1489 */
1490 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1491 if ( pCur->pDevInsR3 == pDevIns
1492 && pCur->iRegion == iRegion)
1493 return pCur;
1494 return NULL;
1495}
1496
1497
1498/**
1499 * Allocate and register an MMIO2 region.
1500 *
1501 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
1502 * RAM associated with a device. It is also non-shared memory with a
1503 * permanent ring-3 mapping and page backing (presently).
1504 *
1505 * A MMIO2 range may overlap with base memory if a lot of RAM
1506 * is configured for the VM, in which case we'll drop the base
1507 * memory pages. Presently we will make no attempt to preserve
1508 * anything that happens to be present in the base memory that
1509 * is replaced, this is of course incorrectly but it's too much
1510 * effort.
1511 *
1512 * @returns VBox status code.
1513 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
1514 * @retval VERR_ALREADY_EXISTS if the region already exists.
1515 *
1516 * @param pVM Pointer to the shared VM structure.
1517 * @param pDevIns The device instance owning the region.
1518 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
1519 * this number has to be the number of that region. Otherwise
1520 * it can be any number safe UINT8_MAX.
1521 * @param cb The size of the region. Must be page aligned.
1522 * @param fFlags Reserved for future use, must be zero.
1523 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
1524 * @param pszDesc The description.
1525 */
1526VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
1527{
1528 /*
1529 * Validate input.
1530 */
1531 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1532 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1533 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1534 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
1535 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1536 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1537 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
1538 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1539 AssertReturn(cb, VERR_INVALID_PARAMETER);
1540 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
1541
1542 const uint32_t cPages = cb >> PAGE_SHIFT;
1543 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
1544 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
1545
1546 /*
1547 * Try reserve and allocate the backing memory first as this is what is
1548 * most likely to fail.
1549 */
1550 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
1551 if (RT_FAILURE(rc))
1552 return rc;
1553
1554 void *pvPages;
1555 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
1556 if (RT_SUCCESS(rc))
1557 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
1558 if (RT_SUCCESS(rc))
1559 {
1560 memset(pvPages, 0, cPages * PAGE_SIZE);
1561
1562 /*
1563 * Create the MMIO2 range record for it.
1564 */
1565 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
1566 PPGMMMIO2RANGE pNew;
1567 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1568 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
1569 if (RT_SUCCESS(rc))
1570 {
1571 pNew->pDevInsR3 = pDevIns;
1572 pNew->pvR3 = pvPages;
1573 //pNew->pNext = NULL;
1574 //pNew->fMapped = false;
1575 //pNew->fOverlapping = false;
1576 pNew->iRegion = iRegion;
1577 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
1578 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange);
1579 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
1580 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
1581 pNew->RamRange.pszDesc = pszDesc;
1582 pNew->RamRange.cb = cb;
1583 //pNew->RamRange.fFlags = 0; /// @todo MMIO2 flag?
1584
1585 pNew->RamRange.pvR3 = pvPages;
1586#ifndef VBOX_WITH_NEW_PHYS_CODE
1587 pNew->RamRange.paChunkR3Ptrs = NULL;
1588#endif
1589
1590 uint32_t iPage = cPages;
1591 while (iPage-- > 0)
1592 {
1593 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
1594 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1595 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1596 }
1597
1598 /* update page count stats */
1599 pVM->pgm.s.cAllPages += cPages;
1600 pVM->pgm.s.cPrivatePages += cPages;
1601
1602 /*
1603 * Link it into the list.
1604 * Since there is no particular order, just push it.
1605 */
1606 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
1607 pVM->pgm.s.pMmio2RangesR3 = pNew;
1608
1609 *ppv = pvPages;
1610 RTMemTmpFree(paPages);
1611 return VINF_SUCCESS;
1612 }
1613
1614 SUPR3PageFreeEx(pvPages, cPages);
1615 }
1616 RTMemTmpFree(paPages);
1617 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
1618 return rc;
1619}
1620
1621
1622/**
1623 * Deregisters and frees an MMIO2 region.
1624 *
1625 * Any physical (and virtual) access handlers registered for the region must
1626 * be deregistered before calling this function.
1627 *
1628 * @returns VBox status code.
1629 * @param pVM Pointer to the shared VM structure.
1630 * @param pDevIns The device instance owning the region.
1631 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
1632 */
1633VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1634{
1635 /*
1636 * Validate input.
1637 */
1638 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1639 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1640 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
1641
1642 int rc = VINF_SUCCESS;
1643 unsigned cFound = 0;
1644 PPGMMMIO2RANGE pPrev = NULL;
1645 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
1646 while (pCur)
1647 {
1648 if ( pCur->pDevInsR3 == pDevIns
1649 && ( iRegion == UINT32_MAX
1650 || pCur->iRegion == iRegion))
1651 {
1652 cFound++;
1653
1654 /*
1655 * Unmap it if it's mapped.
1656 */
1657 if (pCur->fMapped)
1658 {
1659 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
1660 AssertRC(rc2);
1661 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1662 rc = rc2;
1663 }
1664
1665 /*
1666 * Unlink it
1667 */
1668 PPGMMMIO2RANGE pNext = pCur->pNextR3;
1669 if (pPrev)
1670 pPrev->pNextR3 = pNext;
1671 else
1672 pVM->pgm.s.pMmio2RangesR3 = pNext;
1673 pCur->pNextR3 = NULL;
1674
1675 /*
1676 * Free the memory.
1677 */
1678 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
1679 AssertRC(rc2);
1680 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1681 rc = rc2;
1682
1683 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
1684 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
1685 AssertRC(rc2);
1686 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1687 rc = rc2;
1688
1689 /* we're leaking hyper memory here if done at runtime. */
1690 Assert( VMR3GetState(pVM) == VMSTATE_OFF
1691 || VMR3GetState(pVM) == VMSTATE_DESTROYING
1692 || VMR3GetState(pVM) == VMSTATE_TERMINATED
1693 || VMR3GetState(pVM) == VMSTATE_CREATING);
1694 /*rc = MMHyperFree(pVM, pCur);
1695 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
1696
1697
1698 /* update page count stats */
1699 pVM->pgm.s.cAllPages -= cPages;
1700 pVM->pgm.s.cPrivatePages -= cPages;
1701
1702 /* next */
1703 pCur = pNext;
1704 }
1705 else
1706 {
1707 pPrev = pCur;
1708 pCur = pCur->pNextR3;
1709 }
1710 }
1711
1712 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
1713}
1714
1715
1716/**
1717 * Maps a MMIO2 region.
1718 *
1719 * This is done when a guest / the bios / state loading changes the
1720 * PCI config. The replacing of base memory has the same restrictions
1721 * as during registration, of course.
1722 *
1723 * @returns VBox status code.
1724 *
1725 * @param pVM Pointer to the shared VM structure.
1726 * @param pDevIns The
1727 */
1728VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1729{
1730 /*
1731 * Validate input
1732 */
1733 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1734 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1735 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1736 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1737 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1738 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1739
1740 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1741 AssertReturn(pCur, VERR_NOT_FOUND);
1742 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
1743 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
1744 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
1745
1746 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
1747 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1748
1749 /*
1750 * Find our location in the ram range list, checking for
1751 * restriction we don't bother implementing yet (partially overlapping).
1752 */
1753 bool fRamExists = false;
1754 PPGMRAMRANGE pRamPrev = NULL;
1755 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1756 while (pRam && GCPhysLast >= pRam->GCPhys)
1757 {
1758 if ( GCPhys <= pRam->GCPhysLast
1759 && GCPhysLast >= pRam->GCPhys)
1760 {
1761 /* completely within? */
1762 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1763 && GCPhysLast <= pRam->GCPhysLast,
1764 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
1765 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
1766 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1767 VERR_PGM_RAM_CONFLICT);
1768 fRamExists = true;
1769 break;
1770 }
1771
1772 /* next */
1773 pRamPrev = pRam;
1774 pRam = pRam->pNextR3;
1775 }
1776 if (fRamExists)
1777 {
1778 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1779 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1780 while (cPagesLeft-- > 0)
1781 {
1782 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1783 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
1784 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
1785 VERR_PGM_RAM_CONFLICT);
1786 pPage++;
1787 }
1788 }
1789 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
1790 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
1791
1792 /*
1793 * Make the changes.
1794 */
1795 pgmLock(pVM);
1796
1797 pCur->RamRange.GCPhys = GCPhys;
1798 pCur->RamRange.GCPhysLast = GCPhysLast;
1799 pCur->fMapped = true;
1800 pCur->fOverlapping = fRamExists;
1801
1802 if (fRamExists)
1803 {
1804/** @todo use pgmR3PhysFreePageRange here. */
1805 uint32_t cPendingPages = 0;
1806 PGMMFREEPAGESREQ pReq;
1807 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1808 AssertLogRelRCReturn(rc, rc);
1809
1810 /* replace the pages, freeing all present RAM pages. */
1811 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
1812 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1813 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1814 while (cPagesLeft-- > 0)
1815 {
1816 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
1817 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1818
1819 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
1820 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
1821 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
1822 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
1823
1824 pVM->pgm.s.cZeroPages--;
1825 GCPhys += PAGE_SIZE;
1826 pPageSrc++;
1827 pPageDst++;
1828 }
1829
1830 if (cPendingPages)
1831 {
1832 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1833 AssertLogRelRCReturn(rc, rc);
1834 }
1835 GMMR3FreePagesCleanup(pReq);
1836 }
1837 else
1838 {
1839 /* link in the ram range */
1840 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
1841 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
1842 }
1843
1844 pgmUnlock(pVM);
1845
1846 return VINF_SUCCESS;
1847}
1848
1849
1850/**
1851 * Unmaps a MMIO2 region.
1852 *
1853 * This is done when a guest / the bios / state loading changes the
1854 * PCI config. The replacing of base memory has the same restrictions
1855 * as during registration, of course.
1856 */
1857VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1858{
1859 /*
1860 * Validate input
1861 */
1862 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1863 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1864 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1865 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1866 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1867 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1868
1869 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1870 AssertReturn(pCur, VERR_NOT_FOUND);
1871 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
1872 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
1873 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
1874
1875 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
1876 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
1877
1878 /*
1879 * Unmap it.
1880 */
1881 pgmLock(pVM);
1882
1883 if (pCur->fOverlapping)
1884 {
1885 /* Restore the RAM pages we've replaced. */
1886 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1887 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
1888 pRam = pRam->pNextR3;
1889
1890 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
1891 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
1892 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1893 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1894 while (cPagesLeft-- > 0)
1895 {
1896 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhysZeroPg);
1897 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
1898 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
1899 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
1900
1901 pVM->pgm.s.cZeroPages++;
1902 pPageDst++;
1903 }
1904 }
1905 else
1906 {
1907 REMR3NotifyPhysRamDeregister(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
1908 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
1909 }
1910
1911 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
1912 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
1913 pCur->fOverlapping = false;
1914 pCur->fMapped = false;
1915
1916 pgmUnlock(pVM);
1917
1918 return VINF_SUCCESS;
1919}
1920
1921
1922/**
1923 * Checks if the given address is an MMIO2 base address or not.
1924 *
1925 * @returns true/false accordingly.
1926 * @param pVM Pointer to the shared VM structure.
1927 * @param pDevIns The owner of the memory, optional.
1928 * @param GCPhys The address to check.
1929 */
1930VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1931{
1932 /*
1933 * Validate input
1934 */
1935 VM_ASSERT_EMT_RETURN(pVM, false);
1936 AssertPtrReturn(pDevIns, false);
1937 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1938 AssertReturn(GCPhys != 0, false);
1939 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1940
1941 /*
1942 * Search the list.
1943 */
1944 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1945 if (pCur->RamRange.GCPhys == GCPhys)
1946 {
1947 Assert(pCur->fMapped);
1948 return true;
1949 }
1950 return false;
1951}
1952
1953
1954/**
1955 * Gets the HC physical address of a page in the MMIO2 region.
1956 *
1957 * This is API is intended for MMHyper and shouldn't be called
1958 * by anyone else...
1959 *
1960 * @returns VBox status code.
1961 * @param pVM Pointer to the shared VM structure.
1962 * @param pDevIns The owner of the memory, optional.
1963 * @param iRegion The region.
1964 * @param off The page expressed an offset into the MMIO2 region.
1965 * @param pHCPhys Where to store the result.
1966 */
1967VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1968{
1969 /*
1970 * Validate input
1971 */
1972 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1973 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1974 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1975
1976 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1977 AssertReturn(pCur, VERR_NOT_FOUND);
1978 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1979
1980 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1981 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1982 return VINF_SUCCESS;
1983}
1984
1985
1986/**
1987 * Maps a portion of an MMIO2 region into kernel space (host).
1988 *
1989 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
1990 * or the VM is terminated.
1991 *
1992 * @return VBox status code.
1993 *
1994 * @param pVM Pointer to the shared VM structure.
1995 * @param pDevIns The device owning the MMIO2 memory.
1996 * @param iRegion The region.
1997 * @param off The offset into the region. Must be page aligned.
1998 * @param cb The number of bytes to map. Must be page aligned.
1999 * @param pszDesc Mapping description.
2000 * @param pR0Ptr Where to store the R0 address.
2001 */
2002VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
2003 const char *pszDesc, PRTR0PTR pR0Ptr)
2004{
2005 /*
2006 * Validate input.
2007 */
2008 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2009 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2010 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2011
2012 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2013 AssertReturn(pCur, VERR_NOT_FOUND);
2014 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2015 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2016 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2017
2018 /*
2019 * Pass the request on to the support library/driver.
2020 */
2021 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
2022
2023 return rc;
2024}
2025
2026
2027/**
2028 * Registers a ROM image.
2029 *
2030 * Shadowed ROM images requires double the amount of backing memory, so,
2031 * don't use that unless you have to. Shadowing of ROM images is process
2032 * where we can select where the reads go and where the writes go. On real
2033 * hardware the chipset provides means to configure this. We provide
2034 * PGMR3PhysProtectROM() for this purpose.
2035 *
2036 * A read-only copy of the ROM image will always be kept around while we
2037 * will allocate RAM pages for the changes on demand (unless all memory
2038 * is configured to be preallocated).
2039 *
2040 * @returns VBox status.
2041 * @param pVM VM Handle.
2042 * @param pDevIns The device instance owning the ROM.
2043 * @param GCPhys First physical address in the range.
2044 * Must be page aligned!
2045 * @param cbRange The size of the range (in bytes).
2046 * Must be page aligned!
2047 * @param pvBinary Pointer to the binary data backing the ROM image.
2048 * This must be exactly \a cbRange in size.
2049 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
2050 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
2051 * @param pszDesc Pointer to description string. This must not be freed.
2052 *
2053 * @remark There is no way to remove the rom, automatically on device cleanup or
2054 * manually from the device yet. This isn't difficult in any way, it's
2055 * just not something we expect to be necessary for a while.
2056 */
2057VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
2058 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
2059{
2060 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
2061 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
2062
2063 /*
2064 * Validate input.
2065 */
2066 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2067 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
2068 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
2069 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2070 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2071 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
2072 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2073 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
2074 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
2075
2076 const uint32_t cPages = cb >> PAGE_SHIFT;
2077
2078 /*
2079 * Find the ROM location in the ROM list first.
2080 */
2081 PPGMROMRANGE pRomPrev = NULL;
2082 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
2083 while (pRom && GCPhysLast >= pRom->GCPhys)
2084 {
2085 if ( GCPhys <= pRom->GCPhysLast
2086 && GCPhysLast >= pRom->GCPhys)
2087 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
2088 GCPhys, GCPhysLast, pszDesc,
2089 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
2090 VERR_PGM_RAM_CONFLICT);
2091 /* next */
2092 pRomPrev = pRom;
2093 pRom = pRom->pNextR3;
2094 }
2095
2096 /*
2097 * Find the RAM location and check for conflicts.
2098 *
2099 * Conflict detection is a bit different than for RAM
2100 * registration since a ROM can be located within a RAM
2101 * range. So, what we have to check for is other memory
2102 * types (other than RAM that is) and that we don't span
2103 * more than one RAM range (layz).
2104 */
2105 bool fRamExists = false;
2106 PPGMRAMRANGE pRamPrev = NULL;
2107 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2108 while (pRam && GCPhysLast >= pRam->GCPhys)
2109 {
2110 if ( GCPhys <= pRam->GCPhysLast
2111 && GCPhysLast >= pRam->GCPhys)
2112 {
2113 /* completely within? */
2114 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2115 && GCPhysLast <= pRam->GCPhysLast,
2116 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
2117 GCPhys, GCPhysLast, pszDesc,
2118 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2119 VERR_PGM_RAM_CONFLICT);
2120 fRamExists = true;
2121 break;
2122 }
2123
2124 /* next */
2125 pRamPrev = pRam;
2126 pRam = pRam->pNextR3;
2127 }
2128 if (fRamExists)
2129 {
2130 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2131 uint32_t cPagesLeft = cPages;
2132 while (cPagesLeft-- > 0)
2133 {
2134 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2135 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
2136 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
2137 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
2138 Assert(PGM_PAGE_IS_ZERO(pPage));
2139 pPage++;
2140 }
2141 }
2142
2143 /*
2144 * Update the base memory reservation if necessary.
2145 */
2146 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
2147 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2148 cExtraBaseCost += cPages;
2149 if (cExtraBaseCost)
2150 {
2151 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
2152 if (RT_FAILURE(rc))
2153 return rc;
2154 }
2155
2156 /*
2157 * Allocate memory for the virgin copy of the RAM.
2158 */
2159 PGMMALLOCATEPAGESREQ pReq;
2160 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
2161 AssertRCReturn(rc, rc);
2162
2163 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2164 {
2165 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
2166 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
2167 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
2168 }
2169
2170 pgmLock(pVM);
2171 rc = GMMR3AllocatePagesPerform(pVM, pReq);
2172 pgmUnlock(pVM);
2173 if (RT_FAILURE(rc))
2174 {
2175 GMMR3AllocatePagesCleanup(pReq);
2176 return rc;
2177 }
2178
2179 /*
2180 * Allocate the new ROM range and RAM range (if necessary).
2181 */
2182 PPGMROMRANGE pRomNew;
2183 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
2184 if (RT_SUCCESS(rc))
2185 {
2186 PPGMRAMRANGE pRamNew = NULL;
2187 if (!fRamExists)
2188 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
2189 if (RT_SUCCESS(rc))
2190 {
2191 pgmLock(pVM);
2192
2193 /*
2194 * Initialize and insert the RAM range (if required).
2195 */
2196 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
2197 if (!fRamExists)
2198 {
2199 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
2200 pRamNew->pSelfRC = MMHyperCCToRC(pVM, pRamNew);
2201 pRamNew->GCPhys = GCPhys;
2202 pRamNew->GCPhysLast = GCPhysLast;
2203 pRamNew->cb = cb;
2204 pRamNew->pszDesc = pszDesc;
2205 pRamNew->fFlags = 0;
2206 pRamNew->pvR3 = NULL;
2207
2208 PPGMPAGE pPage = &pRamNew->aPages[0];
2209 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2210 {
2211 PGM_PAGE_INIT(pPage,
2212 pReq->aPages[iPage].HCPhysGCPhys,
2213 pReq->aPages[iPage].idPage,
2214 PGMPAGETYPE_ROM,
2215 PGM_PAGE_STATE_ALLOCATED);
2216
2217 pRomPage->Virgin = *pPage;
2218 }
2219
2220 pVM->pgm.s.cAllPages += cPages;
2221 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
2222 }
2223 else
2224 {
2225 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2226 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2227 {
2228 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
2229 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
2230 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
2231 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
2232
2233 pRomPage->Virgin = *pPage;
2234 }
2235
2236 pRamNew = pRam;
2237
2238 pVM->pgm.s.cZeroPages -= cPages;
2239 }
2240 pVM->pgm.s.cPrivatePages += cPages;
2241
2242 pgmUnlock(pVM);
2243
2244
2245 /*
2246 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
2247 *
2248 * If it's shadowed we'll register the handler after the ROM notification
2249 * so we get the access handler callbacks that we should. If it isn't
2250 * shadowed we'll do it the other way around to make REM use the built-in
2251 * ROM behavior and not the handler behavior (which is to route all access
2252 * to PGM atm).
2253 */
2254 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2255 {
2256 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
2257 rc = PGMR3HandlerPhysicalRegister(pVM,
2258 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2259 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2260 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2261 GCPhys, GCPhysLast,
2262 pgmR3PhysRomWriteHandler, pRomNew,
2263 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2264 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2265 }
2266 else
2267 {
2268 rc = PGMR3HandlerPhysicalRegister(pVM,
2269 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2270 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2271 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2272 GCPhys, GCPhysLast,
2273 pgmR3PhysRomWriteHandler, pRomNew,
2274 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2275 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2276 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
2277 }
2278 if (RT_SUCCESS(rc))
2279 {
2280 pgmLock(pVM);
2281
2282 /*
2283 * Copy the image over to the virgin pages.
2284 * This must be done after linking in the RAM range.
2285 */
2286 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
2287 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
2288 {
2289 void *pvDstPage;
2290 PPGMPAGEMAP pMapIgnored;
2291 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
2292 if (RT_FAILURE(rc))
2293 {
2294 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
2295 break;
2296 }
2297 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
2298 }
2299 if (RT_SUCCESS(rc))
2300 {
2301 /*
2302 * Initialize the ROM range.
2303 * Note that the Virgin member of the pages has already been initialized above.
2304 */
2305 pRomNew->GCPhys = GCPhys;
2306 pRomNew->GCPhysLast = GCPhysLast;
2307 pRomNew->cb = cb;
2308 pRomNew->fFlags = fFlags;
2309 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY ? pvBinary : NULL;
2310 pRomNew->pszDesc = pszDesc;
2311
2312 for (unsigned iPage = 0; iPage < cPages; iPage++)
2313 {
2314 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
2315 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
2316 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
2317 }
2318
2319 /* update the page count stats */
2320 pVM->pgm.s.cZeroPages += cPages;
2321 pVM->pgm.s.cAllPages += cPages;
2322
2323 /*
2324 * Insert the ROM range, tell REM and return successfully.
2325 */
2326 pRomNew->pNextR3 = pRom;
2327 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
2328 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
2329
2330 if (pRomPrev)
2331 {
2332 pRomPrev->pNextR3 = pRomNew;
2333 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
2334 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
2335 }
2336 else
2337 {
2338 pVM->pgm.s.pRomRangesR3 = pRomNew;
2339 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
2340 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
2341 }
2342
2343 GMMR3AllocatePagesCleanup(pReq);
2344 pgmUnlock(pVM);
2345 return VINF_SUCCESS;
2346 }
2347
2348 /* bail out */
2349
2350 pgmUnlock(pVM);
2351 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2352 AssertRC(rc2);
2353 pgmLock(pVM);
2354 }
2355
2356 if (!fRamExists)
2357 {
2358 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
2359 MMHyperFree(pVM, pRamNew);
2360 }
2361 }
2362 MMHyperFree(pVM, pRomNew);
2363 }
2364
2365 /** @todo Purge the mapping cache or something... */
2366 GMMR3FreeAllocatedPages(pVM, pReq);
2367 GMMR3AllocatePagesCleanup(pReq);
2368 pgmUnlock(pVM);
2369 return rc;
2370}
2371
2372
2373/**
2374 * \#PF Handler callback for ROM write accesses.
2375 *
2376 * @returns VINF_SUCCESS if the handler have carried out the operation.
2377 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2378 * @param pVM VM Handle.
2379 * @param GCPhys The physical address the guest is writing to.
2380 * @param pvPhys The HC mapping of that address.
2381 * @param pvBuf What the guest is reading/writing.
2382 * @param cbBuf How much it's reading/writing.
2383 * @param enmAccessType The access type.
2384 * @param pvUser User argument.
2385 */
2386static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
2387{
2388 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
2389 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2390 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
2391 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2392 Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
2393
2394 if (enmAccessType == PGMACCESSTYPE_READ)
2395 {
2396 switch (pRomPage->enmProt)
2397 {
2398 /*
2399 * Take the default action.
2400 */
2401 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2402 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2403 case PGMROMPROT_READ_ROM_WRITE_RAM:
2404 case PGMROMPROT_READ_RAM_WRITE_RAM:
2405 return VINF_PGM_HANDLER_DO_DEFAULT;
2406
2407 default:
2408 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2409 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2410 VERR_INTERNAL_ERROR);
2411 }
2412 }
2413 else
2414 {
2415 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2416 switch (pRomPage->enmProt)
2417 {
2418 /*
2419 * Ignore writes.
2420 */
2421 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2422 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2423 return VINF_SUCCESS;
2424
2425 /*
2426 * Write to the ram page.
2427 */
2428 case PGMROMPROT_READ_ROM_WRITE_RAM:
2429 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
2430 {
2431 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
2432 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
2433
2434 /*
2435 * Take the lock, do lazy allocation, map the page and copy the data.
2436 *
2437 * Note that we have to bypass the mapping TLB since it works on
2438 * guest physical addresses and entering the shadow page would
2439 * kind of screw things up...
2440 */
2441 int rc = pgmLock(pVM);
2442 AssertRC(rc);
2443 PPGMPAGE pShadowPage = &pRomPage->Shadow;
2444 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
2445 {
2446 pShadowPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2447 AssertLogRelReturn(pShadowPage, VERR_INTERNAL_ERROR);
2448 }
2449
2450 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pShadowPage) != PGM_PAGE_STATE_ALLOCATED))
2451 {
2452 rc = pgmPhysPageMakeWritable(pVM, pShadowPage, GCPhys);
2453 if (RT_FAILURE(rc))
2454 {
2455 pgmUnlock(pVM);
2456 return rc;
2457 }
2458 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
2459 }
2460
2461 void *pvDstPage;
2462 PPGMPAGEMAP pMapIgnored;
2463 int rc2 = pgmPhysPageMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
2464 if (RT_SUCCESS(rc2))
2465 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
2466 else
2467 rc = rc2;
2468
2469 pgmUnlock(pVM);
2470 return rc;
2471 }
2472
2473 default:
2474 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2475 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2476 VERR_INTERNAL_ERROR);
2477 }
2478 }
2479}
2480
2481
2482/**
2483 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
2484 * and verify that the virgin part is untouched.
2485 *
2486 * This is done after the normal memory has been cleared.
2487 *
2488 * ASSUMES that the caller owns the PGM lock.
2489 *
2490 * @param pVM The VM handle.
2491 */
2492int pgmR3PhysRomReset(PVM pVM)
2493{
2494 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2495 {
2496 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
2497
2498 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2499 {
2500 /*
2501 * Reset the physical handler.
2502 */
2503 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
2504 AssertRCReturn(rc, rc);
2505
2506 /*
2507 * What we do with the shadow pages depends on the memory
2508 * preallocation option. If not enabled, we'll just throw
2509 * out all the dirty pages and replace them by the zero page.
2510 */
2511 if (!pVM->pgm.s.fRamPreAlloc)
2512 {
2513 /* Free the dirty pages. */
2514 uint32_t cPendingPages = 0;
2515 PGMMFREEPAGESREQ pReq;
2516 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2517 AssertRCReturn(rc, rc);
2518
2519 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2520 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
2521 {
2522 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
2523 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow, pRom->GCPhys + (iPage << PAGE_SHIFT));
2524 AssertLogRelRCReturn(rc, rc);
2525 }
2526
2527 if (cPendingPages)
2528 {
2529 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2530 AssertLogRelRCReturn(rc, rc);
2531 }
2532 GMMR3FreePagesCleanup(pReq);
2533 }
2534 else
2535 {
2536 /* clear all the shadow pages. */
2537 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2538 {
2539 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO);
2540
2541 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2542 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
2543 if (RT_FAILURE(rc))
2544 break;
2545
2546 void *pvDstPage;
2547 PPGMPAGEMAP pMapIgnored;
2548 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
2549 if (RT_FAILURE(rc))
2550 break;
2551 ASMMemZeroPage(pvDstPage);
2552 }
2553 AssertRCReturn(rc, rc);
2554 }
2555 }
2556
2557#ifdef VBOX_STRICT
2558 /*
2559 * Verify that the virgin page is unchanged if possible.
2560 */
2561 if (pRom->pvOriginal)
2562 {
2563 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
2564 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
2565 {
2566 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2567 PPGMPAGEMAP pMapIgnored;
2568 void *pvDstPage;
2569 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
2570 if (RT_FAILURE(rc))
2571 break;
2572 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
2573 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
2574 GCPhys, pRom->pszDesc));
2575 }
2576 }
2577#endif
2578 }
2579
2580 return VINF_SUCCESS;
2581}
2582
2583
2584/**
2585 * Change the shadowing of a range of ROM pages.
2586 *
2587 * This is intended for implementing chipset specific memory registers
2588 * and will not be very strict about the input. It will silently ignore
2589 * any pages that are not the part of a shadowed ROM.
2590 *
2591 * @returns VBox status code.
2592 * @retval VINF_PGM_SYNC_CR3
2593 *
2594 * @param pVM Pointer to the shared VM structure.
2595 * @param GCPhys Where to start. Page aligned.
2596 * @param cb How much to change. Page aligned.
2597 * @param enmProt The new ROM protection.
2598 */
2599VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
2600{
2601 /*
2602 * Check input
2603 */
2604 if (!cb)
2605 return VINF_SUCCESS;
2606 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2607 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2608 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2609 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2610 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
2611
2612 /*
2613 * Process the request.
2614 */
2615 int rc = VINF_SUCCESS;
2616 bool fFlushTLB = false;
2617 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2618 if ( GCPhys <= pRom->GCPhysLast
2619 && GCPhysLast >= pRom->GCPhys
2620 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
2621 {
2622 /*
2623 * Iterate the relevant pages and make necessary the changes.
2624 */
2625 bool fChanges = false;
2626 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
2627 ? pRom->cb >> PAGE_SHIFT
2628 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
2629 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2630 iPage < cPages;
2631 iPage++)
2632 {
2633 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2634 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
2635 {
2636 fChanges = true;
2637
2638 /* flush references to the page. */
2639 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
2640 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pRamPage, &fFlushTLB);
2641 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
2642 rc = rc2;
2643
2644 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2645 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2646
2647 *pOld = *pRamPage;
2648 *pRamPage = *pNew;
2649 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
2650 }
2651 pRomPage->enmProt = enmProt;
2652 }
2653
2654 /*
2655 * Reset the access handler if we made changes, no need
2656 * to optimize this.
2657 */
2658 if (fChanges)
2659 {
2660 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
2661 AssertRCReturn(rc, rc);
2662 }
2663
2664 /* Advance - cb isn't updated. */
2665 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
2666 }
2667
2668 if (fFlushTLB)
2669 PGM_INVL_GUEST_TLBS();
2670 return rc;
2671}
2672
2673#ifndef VBOX_WITH_NEW_PHYS_CODE
2674
2675/**
2676 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
2677 * registration APIs calls to inform PGM about memory registrations.
2678 *
2679 * It registers the physical memory range with PGM. MM is responsible
2680 * for the toplevel things - allocation and locking - while PGM is taking
2681 * care of all the details and implements the physical address space virtualization.
2682 *
2683 * @returns VBox status.
2684 * @param pVM The VM handle.
2685 * @param pvRam HC virtual address of the RAM range. (page aligned)
2686 * @param GCPhys GC physical address of the RAM range. (page aligned)
2687 * @param cb Size of the RAM range. (page aligned)
2688 * @param fFlags Flags, MM_RAM_*.
2689 * @param paPages Pointer an array of physical page descriptors.
2690 * @param pszDesc Description string.
2691 */
2692VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
2693{
2694 /*
2695 * Validate input.
2696 * (Not so important because callers are only MMR3PhysRegister()
2697 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
2698 */
2699 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
2700
2701 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
2702 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
2703 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
2704 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
2705 Assert(!(fFlags & ~0xfff));
2706 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2707 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
2708 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
2709 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2710 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2711 if (GCPhysLast < GCPhys)
2712 {
2713 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2714 return VERR_INVALID_PARAMETER;
2715 }
2716
2717 /*
2718 * Find range location and check for conflicts.
2719 */
2720 PPGMRAMRANGE pPrev = NULL;
2721 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
2722 while (pCur)
2723 {
2724 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
2725 {
2726 AssertMsgFailed(("Conflict! This cannot happen!\n"));
2727 return VERR_PGM_RAM_CONFLICT;
2728 }
2729 if (GCPhysLast < pCur->GCPhys)
2730 break;
2731
2732 /* next */
2733 pPrev = pCur;
2734 pCur = pCur->pNextR3;
2735 }
2736
2737 /*
2738 * Allocate RAM range.
2739 * Small ranges are allocated from the heap, big ones have separate mappings.
2740 */
2741 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
2742 PPGMRAMRANGE pNew;
2743 int rc = VERR_NO_MEMORY;
2744 if (cbRam > PAGE_SIZE / 2)
2745 { /* large */
2746 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
2747 rc = MMR3HyperAllocOnceNoRel(pVM, cbRam, PAGE_SIZE, MM_TAG_PGM_PHYS, (void **)&pNew);
2748 AssertMsgRC(rc, ("MMR3HyperAllocOnceNoRel(,%#x,,) -> %Rrc\n", cbRam, rc));
2749 }
2750 else
2751 { /* small */
2752 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
2753 AssertMsgRC(rc, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, rc));
2754 }
2755 if (RT_SUCCESS(rc))
2756 {
2757 /*
2758 * Initialize the range.
2759 */
2760 pNew->pvR3 = pvRam;
2761 pNew->GCPhys = GCPhys;
2762 pNew->GCPhysLast = GCPhysLast;
2763 pNew->cb = cb;
2764 pNew->fFlags = fFlags;
2765 pNew->paChunkR3Ptrs = NULL;
2766
2767 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
2768 if (paPages)
2769 {
2770 while (iPage-- > 0)
2771 {
2772 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
2773 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
2774 PGM_PAGE_STATE_ALLOCATED);
2775 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
2776 }
2777 }
2778 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2779 {
2780 /* Allocate memory for chunk to HC ptr lookup array. */
2781 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
2782 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, cb), rc);
2783
2784 /* Physical memory will be allocated on demand. */
2785 while (iPage-- > 0)
2786 {
2787 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
2788 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
2789 }
2790 }
2791 else
2792 {
2793 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
2794 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
2795 while (iPage-- > 0)
2796 {
2797 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
2798 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
2799 }
2800 }
2801
2802 /*
2803 * Insert the new RAM range.
2804 */
2805 pgmLock(pVM);
2806 pNew->pNextR3 = pCur;
2807 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
2808 pNew->pNextRC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTRCPTR;
2809 if (pPrev)
2810 {
2811 pPrev->pNextR3 = pNew;
2812 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
2813 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
2814 }
2815 else
2816 {
2817 pVM->pgm.s.pRamRangesR3 = pNew;
2818 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
2819 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
2820 }
2821 pgmUnlock(pVM);
2822 }
2823 return rc;
2824}
2825
2826
2827/**
2828 * Register a chunk of a the physical memory range with PGM. MM is responsible
2829 * for the toplevel things - allocation and locking - while PGM is taking
2830 * care of all the details and implements the physical address space virtualization.
2831 *
2832 *
2833 * @returns VBox status.
2834 * @param pVM The VM handle.
2835 * @param pvRam HC virtual address of the RAM range. (page aligned)
2836 * @param GCPhys GC physical address of the RAM range. (page aligned)
2837 * @param cb Size of the RAM range. (page aligned)
2838 * @param fFlags Flags, MM_RAM_*.
2839 * @param paPages Pointer an array of physical page descriptors.
2840 * @param pszDesc Description string.
2841 */
2842VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
2843{
2844 NOREF(pszDesc);
2845
2846 /*
2847 * Validate input.
2848 * (Not so important because callers are only MMR3PhysRegister()
2849 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
2850 */
2851 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
2852
2853 Assert(paPages);
2854 Assert(pvRam);
2855 Assert(!(fFlags & ~0xfff));
2856 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2857 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
2858 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
2859 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2860 Assert(VM_IS_EMT(pVM));
2861 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2862 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2863
2864 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2865 if (GCPhysLast < GCPhys)
2866 {
2867 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2868 return VERR_INVALID_PARAMETER;
2869 }
2870
2871 /*
2872 * Find existing range location.
2873 */
2874 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2875 while (pRam)
2876 {
2877 RTGCPHYS off = GCPhys - pRam->GCPhys;
2878 if ( off < pRam->cb
2879 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
2880 break;
2881
2882 pRam = pRam->CTX_SUFF(pNext);
2883 }
2884 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
2885
2886 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2887 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
2888 if (paPages)
2889 {
2890 while (iPage-- > 0)
2891 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
2892 }
2893 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
2894 pRam->paChunkR3Ptrs[off] = (uintptr_t)pvRam;
2895
2896 /* Notify the recompiler. */
2897 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
2898
2899 return VINF_SUCCESS;
2900}
2901
2902
2903/**
2904 * Allocate missing physical pages for an existing guest RAM range.
2905 *
2906 * @returns VBox status.
2907 * @param pVM The VM handle.
2908 * @param GCPhys GC physical address of the RAM range. (page aligned)
2909 */
2910VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
2911{
2912 RTGCPHYS GCPhys = *pGCPhys;
2913
2914 /*
2915 * Walk range list.
2916 */
2917 pgmLock(pVM);
2918
2919 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2920 while (pRam)
2921 {
2922 RTGCPHYS off = GCPhys - pRam->GCPhys;
2923 if ( off < pRam->cb
2924 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
2925 {
2926 bool fRangeExists = false;
2927 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
2928
2929 /* Note: A request made from another thread may end up in EMT after somebody else has already allocated the range. */
2930 if (pRam->paChunkR3Ptrs[off])
2931 fRangeExists = true;
2932
2933 pgmUnlock(pVM);
2934 if (fRangeExists)
2935 return VINF_SUCCESS;
2936 return pgmr3PhysGrowRange(pVM, GCPhys);
2937 }
2938
2939 pRam = pRam->CTX_SUFF(pNext);
2940 }
2941 pgmUnlock(pVM);
2942 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2943}
2944
2945
2946/**
2947 * Allocate missing physical pages for an existing guest RAM range.
2948 *
2949 * @returns VBox status.
2950 * @param pVM The VM handle.
2951 * @param pRamRange RAM range
2952 * @param GCPhys GC physical address of the RAM range. (page aligned)
2953 */
2954int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
2955{
2956 void *pvRam;
2957 int rc;
2958
2959 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
2960 if (!VM_IS_EMT(pVM))
2961 {
2962 PVMREQ pReq;
2963 const RTGCPHYS GCPhysParam = GCPhys;
2964
2965 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
2966
2967 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
2968 if (RT_SUCCESS(rc))
2969 {
2970 rc = pReq->iStatus;
2971 VMR3ReqFree(pReq);
2972 }
2973 return rc;
2974 }
2975
2976 /* Round down to chunk boundary */
2977 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
2978
2979 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DynRamGrow);
2980 STAM_COUNTER_ADD(&pVM->pgm.s.StatR3DynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
2981
2982 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %RGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
2983
2984 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
2985
2986 for (;;)
2987 {
2988 rc = SUPPageAlloc(cPages, &pvRam);
2989 if (RT_SUCCESS(rc))
2990 {
2991 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
2992 if (RT_SUCCESS(rc))
2993 return rc;
2994
2995 SUPPageFree(pvRam, cPages);
2996 }
2997
2998 VMSTATE enmVMState = VMR3GetState(pVM);
2999 if (enmVMState != VMSTATE_RUNNING)
3000 {
3001 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %RGp!\n", GCPhys));
3002 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %RGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
3003 return rc;
3004 }
3005
3006 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
3007
3008 /* Pause first, then inform Main. */
3009 rc = VMR3SuspendNoSave(pVM);
3010 AssertRC(rc);
3011
3012 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM");
3013
3014 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
3015 rc = VMR3WaitForResume(pVM);
3016
3017 /* Retry */
3018 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
3019 }
3020}
3021
3022
3023/**
3024 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
3025 * flags of existing RAM ranges.
3026 *
3027 * @returns VBox status.
3028 * @param pVM The VM handle.
3029 * @param GCPhys GC physical address of the RAM range. (page aligned)
3030 * @param cb Size of the RAM range. (page aligned)
3031 * @param fFlags The Or flags, MM_RAM_* \#defines.
3032 * @param fMask The and mask for the flags.
3033 */
3034VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
3035{
3036 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
3037
3038 /*
3039 * Validate input.
3040 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
3041 */
3042 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
3043 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
3044 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3045 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
3046 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
3047
3048 /*
3049 * Lookup the range.
3050 */
3051 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
3052 while (pRam && GCPhys > pRam->GCPhysLast)
3053 pRam = pRam->CTX_SUFF(pNext);
3054 if ( !pRam
3055 || GCPhys > pRam->GCPhysLast
3056 || GCPhysLast < pRam->GCPhys)
3057 {
3058 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
3059 return VERR_INVALID_PARAMETER;
3060 }
3061
3062 /*
3063 * Update the requested flags.
3064 */
3065 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
3066 | fMask;
3067 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
3068 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
3069 for ( ; iPage < iPageEnd; iPage++)
3070 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
3071
3072 return VINF_SUCCESS;
3073}
3074
3075#endif /* !VBOX_WITH_NEW_PHYS_CODE */
3076
3077/**
3078 * Sets the Address Gate 20 state.
3079 *
3080 * @param pVM VM handle.
3081 * @param fEnable True if the gate should be enabled.
3082 * False if the gate should be disabled.
3083 */
3084VMMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
3085{
3086 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
3087 if (pVM->pgm.s.fA20Enabled != fEnable)
3088 {
3089 pVM->pgm.s.fA20Enabled = fEnable;
3090 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
3091 REMR3A20Set(pVM, fEnable);
3092 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
3093 }
3094}
3095
3096
3097/**
3098 * Tree enumeration callback for dealing with age rollover.
3099 * It will perform a simple compression of the current age.
3100 */
3101static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
3102{
3103 /* Age compression - ASSUMES iNow == 4. */
3104 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3105 if (pChunk->iAge >= UINT32_C(0xffffff00))
3106 pChunk->iAge = 3;
3107 else if (pChunk->iAge >= UINT32_C(0xfffff000))
3108 pChunk->iAge = 2;
3109 else if (pChunk->iAge)
3110 pChunk->iAge = 1;
3111 else /* iAge = 0 */
3112 pChunk->iAge = 4;
3113
3114 /* reinsert */
3115 PVM pVM = (PVM)pvUser;
3116 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
3117 pChunk->AgeCore.Key = pChunk->iAge;
3118 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3119 return 0;
3120}
3121
3122
3123/**
3124 * Tree enumeration callback that updates the chunks that have
3125 * been used since the last
3126 */
3127static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
3128{
3129 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3130 if (!pChunk->iAge)
3131 {
3132 PVM pVM = (PVM)pvUser;
3133 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
3134 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
3135 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3136 }
3137
3138 return 0;
3139}
3140
3141
3142/**
3143 * Performs ageing of the ring-3 chunk mappings.
3144 *
3145 * @param pVM The VM handle.
3146 */
3147VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
3148{
3149 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
3150 pVM->pgm.s.ChunkR3Map.iNow++;
3151 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
3152 {
3153 pVM->pgm.s.ChunkR3Map.iNow = 4;
3154 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
3155 }
3156 else
3157 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
3158}
3159
3160
3161/**
3162 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
3163 */
3164typedef struct PGMR3PHYSCHUNKUNMAPCB
3165{
3166 PVM pVM; /**< The VM handle. */
3167 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
3168} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
3169
3170
3171/**
3172 * Callback used to find the mapping that's been unused for
3173 * the longest time.
3174 */
3175static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
3176{
3177 do
3178 {
3179 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
3180 if ( pChunk->iAge
3181 && !pChunk->cRefs)
3182 {
3183 /*
3184 * Check that it's not in any of the TLBs.
3185 */
3186 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
3187 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3188 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
3189 {
3190 pChunk = NULL;
3191 break;
3192 }
3193 if (pChunk)
3194 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
3195 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
3196 {
3197 pChunk = NULL;
3198 break;
3199 }
3200 if (pChunk)
3201 {
3202 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
3203 return 1; /* done */
3204 }
3205 }
3206
3207 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
3208 pNode = pNode->pList;
3209 } while (pNode);
3210 return 0;
3211}
3212
3213
3214/**
3215 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
3216 *
3217 * The candidate will not be part of any TLBs, so no need to flush
3218 * anything afterwards.
3219 *
3220 * @returns Chunk id.
3221 * @param pVM The VM handle.
3222 */
3223static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
3224{
3225 /*
3226 * Do tree ageing first?
3227 */
3228 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
3229 PGMR3PhysChunkAgeing(pVM);
3230
3231 /*
3232 * Enumerate the age tree starting with the left most node.
3233 */
3234 PGMR3PHYSCHUNKUNMAPCB Args;
3235 Args.pVM = pVM;
3236 Args.pChunk = NULL;
3237 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
3238 return Args.pChunk->Core.Key;
3239 return INT32_MAX;
3240}
3241
3242
3243/**
3244 * Maps the given chunk into the ring-3 mapping cache.
3245 *
3246 * This will call ring-0.
3247 *
3248 * @returns VBox status code.
3249 * @param pVM The VM handle.
3250 * @param idChunk The chunk in question.
3251 * @param ppChunk Where to store the chunk tracking structure.
3252 *
3253 * @remarks Called from within the PGM critical section.
3254 */
3255int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
3256{
3257 int rc;
3258 /*
3259 * Allocate a new tracking structure first.
3260 */
3261#if 0 /* for later when we've got a separate mapping method for ring-0. */
3262 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
3263 AssertReturn(pChunk, VERR_NO_MEMORY);
3264#else
3265 PPGMCHUNKR3MAP pChunk;
3266 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
3267 AssertRCReturn(rc, rc);
3268#endif
3269 pChunk->Core.Key = idChunk;
3270 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
3271 pChunk->iAge = 0;
3272 pChunk->cRefs = 0;
3273 pChunk->cPermRefs = 0;
3274 pChunk->pv = NULL;
3275
3276 /*
3277 * Request the ring-0 part to map the chunk in question and if
3278 * necessary unmap another one to make space in the mapping cache.
3279 */
3280 GMMMAPUNMAPCHUNKREQ Req;
3281 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3282 Req.Hdr.cbReq = sizeof(Req);
3283 Req.pvR3 = NULL;
3284 Req.idChunkMap = idChunk;
3285 Req.idChunkUnmap = NIL_GMM_CHUNKID;
3286 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
3287 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
3288 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
3289 if (RT_SUCCESS(rc))
3290 {
3291 /*
3292 * Update the tree.
3293 */
3294 /* insert the new one. */
3295 AssertPtr(Req.pvR3);
3296 pChunk->pv = Req.pvR3;
3297 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
3298 AssertRelease(fRc);
3299 pVM->pgm.s.ChunkR3Map.c++;
3300
3301 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3302 AssertRelease(fRc);
3303
3304 /* remove the unmapped one. */
3305 if (Req.idChunkUnmap != NIL_GMM_CHUNKID)
3306 {
3307 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
3308 AssertRelease(pUnmappedChunk);
3309 pUnmappedChunk->pv = NULL;
3310 pUnmappedChunk->Core.Key = UINT32_MAX;
3311#if 0 /* for later when we've got a separate mapping method for ring-0. */
3312 MMR3HeapFree(pUnmappedChunk);
3313#else
3314 MMHyperFree(pVM, pUnmappedChunk);
3315#endif
3316 pVM->pgm.s.ChunkR3Map.c--;
3317 }
3318 }
3319 else
3320 {
3321 AssertRC(rc);
3322#if 0 /* for later when we've got a separate mapping method for ring-0. */
3323 MMR3HeapFree(pChunk);
3324#else
3325 MMHyperFree(pVM, pChunk);
3326#endif
3327 pChunk = NULL;
3328 }
3329
3330 *ppChunk = pChunk;
3331 return rc;
3332}
3333
3334
3335/**
3336 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
3337 *
3338 * @returns see pgmR3PhysChunkMap.
3339 * @param pVM The VM handle.
3340 * @param idChunk The chunk to map.
3341 */
3342VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
3343{
3344 PPGMCHUNKR3MAP pChunk;
3345 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
3346}
3347
3348
3349/**
3350 * Invalidates the TLB for the ring-3 mapping cache.
3351 *
3352 * @param pVM The VM handle.
3353 */
3354VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
3355{
3356 pgmLock(pVM);
3357 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3358 {
3359 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
3360 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
3361 }
3362 pgmUnlock(pVM);
3363}
3364
3365
3366/**
3367 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
3368 *
3369 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
3370 * signal and clear the out of memory condition. When contracted, this API is
3371 * used to try clear the condition when the user wants to resume.
3372 *
3373 * @returns The following VBox status codes.
3374 * @retval VINF_SUCCESS on success. FFs cleared.
3375 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
3376 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
3377 *
3378 * @param pVM The VM handle.
3379 *
3380 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
3381 * in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
3382 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
3383 * handler.
3384 */
3385VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
3386{
3387 pgmLock(pVM);
3388
3389 /*
3390 * Allocate more pages, noting down the index of the first new page.
3391 */
3392 uint32_t iClear = pVM->pgm.s.cHandyPages;
3393 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
3394 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
3395 int rcAlloc = VINF_SUCCESS;
3396 int rcSeed = VINF_SUCCESS;
3397 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3398 while (rc == VERR_GMM_SEED_ME)
3399 {
3400 void *pvChunk;
3401 rcAlloc = rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
3402 if (RT_SUCCESS(rc))
3403 {
3404 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
3405 if (RT_FAILURE(rc))
3406 SUPPageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
3407 }
3408 if (RT_SUCCESS(rc))
3409 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3410 }
3411
3412 if (RT_SUCCESS(rc))
3413 {
3414 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
3415 Assert(pVM->pgm.s.cHandyPages > 0);
3416 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3417 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
3418
3419 /*
3420 * Clear the pages.
3421 */
3422 while (iClear < pVM->pgm.s.cHandyPages)
3423 {
3424 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
3425 void *pv;
3426 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
3427 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
3428 ASMMemZeroPage(pv);
3429 iClear++;
3430 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
3431 }
3432 }
3433 else
3434 {
3435 /*
3436 * We should never get here unless there is a genuine shortage of
3437 * memory (or some internal error). Flag the error so the VM can be
3438 * suspended ASAP and the user informed. If we're totally out of
3439 * handy pages we will return failure.
3440 */
3441 /* Report the failure. */
3442 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
3443 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
3444 rc, rcSeed, rcAlloc,
3445 pVM->pgm.s.cHandyPages,
3446 pVM->pgm.s.cAllPages,
3447 pVM->pgm.s.cPrivatePages,
3448 pVM->pgm.s.cSharedPages,
3449 pVM->pgm.s.cZeroPages));
3450 if ( rc != VERR_NO_MEMORY
3451 && rc != VERR_LOCK_FAILED)
3452 {
3453 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3454 {
3455 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
3456 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
3457 pVM->pgm.s.aHandyPages[i].idSharedPage));
3458 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
3459 if (idPage != NIL_GMM_PAGEID)
3460 {
3461 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
3462 pRam;
3463 pRam = pRam->pNextR3)
3464 {
3465 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
3466 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3467 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
3468 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
3469 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
3470 }
3471 }
3472 }
3473 }
3474
3475 /* Set the FFs and adjust rc. */
3476 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3477 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
3478 if ( rc == VERR_NO_MEMORY
3479 || rc == VERR_LOCK_FAILED)
3480 rc = VINF_EM_NO_MEMORY;
3481 }
3482
3483 pgmUnlock(pVM);
3484 return rc;
3485}
3486
3487
3488/**
3489 * Frees the specified RAM page and replaces it with the ZERO page.
3490 *
3491 * This is used by ballooning, remapping MMIO2 and RAM reset.
3492 *
3493 * @param pVM Pointer to the shared VM structure.
3494 * @param pReq Pointer to the request.
3495 * @param pPage Pointer to the page structure.
3496 * @param GCPhys The guest physical address of the page, if applicable.
3497 *
3498 * @remarks The caller must own the PGM lock.
3499 */
3500static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
3501{
3502 /*
3503 * Assert sanity.
3504 */
3505 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
3506 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
3507 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
3508 {
3509 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3510 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
3511 }
3512
3513 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
3514 return VINF_SUCCESS;
3515
3516 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
3517 Log3(("pgmPhysFreePage: idPage=%#x HCPhys=%RGp pPage=%R[pgmpage]\n", idPage, pPage));
3518 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
3519 || idPage > GMM_PAGEID_LAST
3520 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
3521 {
3522 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3523 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
3524 }
3525
3526 /* update page count stats. */
3527 if (PGM_PAGE_IS_SHARED(pPage))
3528 pVM->pgm.s.cSharedPages--;
3529 else
3530 pVM->pgm.s.cPrivatePages--;
3531 pVM->pgm.s.cZeroPages++;
3532
3533 /*
3534 * pPage = ZERO page.
3535 */
3536 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
3537 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
3538 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
3539
3540 /*
3541 * Make sure it's not in the handy page array.
3542 */
3543 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3544 {
3545 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
3546 {
3547 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
3548 break;
3549 }
3550 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
3551 {
3552 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
3553 break;
3554 }
3555 }
3556
3557 /*
3558 * Push it onto the page array.
3559 */
3560 uint32_t iPage = *pcPendingPages;
3561 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
3562 *pcPendingPages += 1;
3563
3564 pReq->aPages[iPage].idPage = idPage;
3565
3566 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
3567 return VINF_SUCCESS;
3568
3569 /*
3570 * Flush the pages.
3571 */
3572 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
3573 if (RT_SUCCESS(rc))
3574 {
3575 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
3576 *pcPendingPages = 0;
3577 }
3578 return rc;
3579}
3580
3581
3582/**
3583 * Converts a GC physical address to a HC ring-3 pointer, with some
3584 * additional checks.
3585 *
3586 * @returns VBox status code.
3587 * @retval VINF_SUCCESS on success.
3588 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3589 * access handler of some kind.
3590 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3591 * accesses or is odd in any way.
3592 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3593 *
3594 * @param pVM The VM handle.
3595 * @param GCPhys The GC physical address to convert.
3596 * @param fWritable Whether write access is required.
3597 * @param ppv Where to store the pointer corresponding to GCPhys on
3598 * success.
3599 */
3600VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
3601{
3602 pgmLock(pVM);
3603
3604 PPGMRAMRANGE pRam;
3605 PPGMPAGE pPage;
3606 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
3607 if (RT_SUCCESS(rc))
3608 {
3609#ifdef VBOX_WITH_NEW_PHYS_CODE
3610 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3611 rc = VINF_SUCCESS;
3612 else
3613 {
3614 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3615 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3616 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3617 {
3618 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
3619 * in -norawr0 mode. */
3620 if (fWritable)
3621 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3622 }
3623 else
3624 {
3625 /* Temporariliy disabled phycial handler(s), since the recompiler
3626 doesn't get notified when it's reset we'll have to pretend its
3627 operating normally. */
3628 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3629 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3630 else
3631 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3632 }
3633 }
3634 if (RT_SUCCESS(rc))
3635 {
3636 int rc2;
3637
3638 /* Make sure what we return is writable. */
3639 if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
3640 switch (PGM_PAGE_GET_STATE(pPage))
3641 {
3642 case PGM_PAGE_STATE_ALLOCATED:
3643 break;
3644 case PGM_PAGE_STATE_ZERO:
3645 case PGM_PAGE_STATE_SHARED:
3646 case PGM_PAGE_STATE_WRITE_MONITORED:
3647 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
3648 AssertLogRelRCReturn(rc2, rc2);
3649 break;
3650 }
3651
3652 /* Get a ring-3 mapping of the address. */
3653 PPGMPAGER3MAPTLBE pTlbe;
3654 rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
3655 AssertLogRelRCReturn(rc2, rc2);
3656 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
3657 /** @todo mapping/locking hell; this isn't horribly efficient since
3658 * pgmPhysPageLoadIntoTlb will repeate the lookup we've done here. */
3659
3660 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3661 }
3662 else
3663 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3664
3665 /* else: handler catching all access, no pointer returned. */
3666
3667#else
3668 if (0)
3669 /* nothing */;
3670 else if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3671 {
3672 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3673 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3674 else if (fWritable && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3675 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3676 else
3677 {
3678 /* Temporariliy disabled phycial handler(s), since the recompiler
3679 doesn't get notified when it's reset we'll have to pretend its
3680 operating normally. */
3681 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3682 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3683 else
3684 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3685 }
3686 }
3687 else
3688 rc = VINF_SUCCESS;
3689 if (RT_SUCCESS(rc))
3690 {
3691 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3692 {
3693 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
3694 RTGCPHYS off = GCPhys - pRam->GCPhys;
3695 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3696 *ppv = (void *)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3697 }
3698 else if (RT_LIKELY(pRam->pvR3))
3699 {
3700 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
3701 RTGCPHYS off = GCPhys - pRam->GCPhys;
3702 *ppv = (uint8_t *)pRam->pvR3 + off;
3703 }
3704 else
3705 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3706 }
3707#endif /* !VBOX_WITH_NEW_PHYS_CODE */
3708 }
3709 else
3710 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3711
3712 pgmUnlock(pVM);
3713 return rc;
3714}
3715
3716
3717
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette