VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 18266

Last change on this file since 18266 was 18266, checked in by vboxsync, 16 years ago

PGM: Made PGMR3PhysRomProtect use instead of doing a full pool flush for each call (expensive using reset), this fixes a reset assertion in pgmPoolMonitorFlush. Changed pgmPoolTrackFlushGCPhys to return VINF_PGM_SYNC_CR3 and set VM_FF_PGM_SYNCR3 and PGM_SYNC_CLEAR_PGM_POOL instead of leave this to the caller.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 121.6 KB
Line 
1/* $Id: PGMPhys.cpp 18266 2009-03-25 17:25:53Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM_PHYS
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/csam.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <VBox/log.h>
44#include <iprt/thread.h>
45#include <iprt/string.h>
46
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** The number of pages to free in one batch. */
52#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
53
54
55/*******************************************************************************
56* Internal Functions *
57*******************************************************************************/
58static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
59static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
60
61
62/*
63 * PGMR3PhysReadU8-64
64 * PGMR3PhysWriteU8-64
65 */
66#define PGMPHYSFN_READNAME PGMR3PhysReadU8
67#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
68#define PGMPHYS_DATASIZE 1
69#define PGMPHYS_DATATYPE uint8_t
70#include "PGMPhysRWTmpl.h"
71
72#define PGMPHYSFN_READNAME PGMR3PhysReadU16
73#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
74#define PGMPHYS_DATASIZE 2
75#define PGMPHYS_DATATYPE uint16_t
76#include "PGMPhysRWTmpl.h"
77
78#define PGMPHYSFN_READNAME PGMR3PhysReadU32
79#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
80#define PGMPHYS_DATASIZE 4
81#define PGMPHYS_DATATYPE uint32_t
82#include "PGMPhysRWTmpl.h"
83
84#define PGMPHYSFN_READNAME PGMR3PhysReadU64
85#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
86#define PGMPHYS_DATASIZE 8
87#define PGMPHYS_DATATYPE uint64_t
88#include "PGMPhysRWTmpl.h"
89
90
91/**
92 * EMT worker for PGMR3PhysReadExternal.
93 */
94static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
95{
96 PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead);
97 return VINF_SUCCESS;
98}
99
100
101/**
102 * Write to physical memory, external users.
103 *
104 * @returns VBox status code.
105 * @retval VINF_SUCCESS.
106 *
107 * @param pVM VM Handle.
108 * @param GCPhys Physical address to write to.
109 * @param pvBuf What to write.
110 * @param cbWrite How many bytes to write.
111 *
112 * @thread Any but EMTs.
113 */
114VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
115{
116 VM_ASSERT_OTHER_THREAD(pVM);
117
118 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
119 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
120
121 pgmLock(pVM);
122
123 /*
124 * Copy loop on ram ranges.
125 */
126 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
127 for (;;)
128 {
129 /* Find range. */
130 while (pRam && GCPhys > pRam->GCPhysLast)
131 pRam = pRam->CTX_SUFF(pNext);
132 /* Inside range or not? */
133 if (pRam && GCPhys >= pRam->GCPhys)
134 {
135 /*
136 * Must work our way thru this page by page.
137 */
138 RTGCPHYS off = GCPhys - pRam->GCPhys;
139 while (off < pRam->cb)
140 {
141 unsigned iPage = off >> PAGE_SHIFT;
142 PPGMPAGE pPage = &pRam->aPages[iPage];
143
144 /*
145 * If the page has an ALL access handler, we'll have to
146 * delegate the job to EMT.
147 */
148 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
149 {
150 pgmUnlock(pVM);
151
152 PVMREQ pReq = NULL;
153 int rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
154 (PFNRT)pgmR3PhysReadExternalEMT, 4, pVM, &GCPhys, pvBuf, cbRead);
155 if (RT_SUCCESS(rc))
156 {
157 rc = pReq->iStatus;
158 VMR3ReqFree(pReq);
159 }
160 return rc;
161 }
162 Assert(!PGM_PAGE_IS_MMIO(pPage));
163
164 /*
165 * Simple stuff, go ahead.
166 */
167 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
168 if (cb > cbRead)
169 cb = cbRead;
170 const void *pvSrc;
171 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
172 if (RT_SUCCESS(rc))
173 memcpy(pvBuf, pvSrc, cb);
174 else
175 {
176 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
177 pRam->GCPhys + off, pPage, rc));
178 memset(pvBuf, 0xff, cb);
179 }
180
181 /* next page */
182 if (cb >= cbRead)
183 {
184 pgmUnlock(pVM);
185 return VINF_SUCCESS;
186 }
187 cbRead -= cb;
188 off += cb;
189 GCPhys += cb;
190 pvBuf = (char *)pvBuf + cb;
191 } /* walk pages in ram range. */
192 }
193 else
194 {
195 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
196
197 /*
198 * Unassigned address space.
199 */
200 if (!pRam)
201 break;
202 size_t cb = pRam->GCPhys - GCPhys;
203 if (cb >= cbRead)
204 {
205 memset(pvBuf, 0xff, cbRead);
206 break;
207 }
208 memset(pvBuf, 0xff, cb);
209
210 cbRead -= cb;
211 pvBuf = (char *)pvBuf + cb;
212 GCPhys += cb;
213 }
214 } /* Ram range walk */
215
216 pgmUnlock(pVM);
217
218 return VINF_SUCCESS;
219}
220
221
222/**
223 * EMT worker for PGMR3PhysWriteExternal.
224 */
225static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
226{
227 /** @todo VERR_EM_NO_MEMORY */
228 PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite);
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Write to physical memory, external users.
235 *
236 * @returns VBox status code.
237 * @retval VINF_SUCCESS.
238 * @retval VERR_EM_NO_MEMORY.
239 *
240 * @param pVM VM Handle.
241 * @param GCPhys Physical address to write to.
242 * @param pvBuf What to write.
243 * @param cbWrite How many bytes to write.
244 *
245 * @thread Any but EMTs.
246 */
247VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
248{
249 VM_ASSERT_OTHER_THREAD(pVM);
250
251 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMR3PhysWriteExternal after pgmR3Save()!\n"));
252 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
253 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
254
255 pgmLock(pVM);
256
257 /*
258 * Copy loop on ram ranges, stop when we hit something difficult.
259 */
260 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
261 for (;;)
262 {
263 /* Find range. */
264 while (pRam && GCPhys > pRam->GCPhysLast)
265 pRam = pRam->CTX_SUFF(pNext);
266 /* Inside range or not? */
267 if (pRam && GCPhys >= pRam->GCPhys)
268 {
269 /*
270 * Must work our way thru this page by page.
271 */
272 RTGCPTR off = GCPhys - pRam->GCPhys;
273 while (off < pRam->cb)
274 {
275 RTGCPTR iPage = off >> PAGE_SHIFT;
276 PPGMPAGE pPage = &pRam->aPages[iPage];
277
278 /*
279 * It the page is in any way problematic, we have to
280 * do the work on the EMT. Anything that needs to be made
281 * writable or involves access handlers is problematic.
282 */
283 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
284 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
285 {
286 pgmUnlock(pVM);
287
288 PVMREQ pReq = NULL;
289 int rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
290 (PFNRT)pgmR3PhysWriteExternalEMT, 4, pVM, &GCPhys, pvBuf, cbWrite);
291 if (RT_SUCCESS(rc))
292 {
293 rc = pReq->iStatus;
294 VMR3ReqFree(pReq);
295 }
296 return rc;
297 }
298 Assert(!PGM_PAGE_IS_MMIO(pPage));
299
300 /*
301 * Simple stuff, go ahead.
302 */
303 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
304 if (cb > cbWrite)
305 cb = cbWrite;
306 void *pvDst;
307 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
308 if (RT_SUCCESS(rc))
309 memcpy(pvDst, pvBuf, cb);
310 else
311 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
312 pRam->GCPhys + off, pPage, rc));
313
314 /* next page */
315 if (cb >= cbWrite)
316 {
317 pgmUnlock(pVM);
318 return VINF_SUCCESS;
319 }
320
321 cbWrite -= cb;
322 off += cb;
323 GCPhys += cb;
324 pvBuf = (const char *)pvBuf + cb;
325 } /* walk pages in ram range */
326 }
327 else
328 {
329 /*
330 * Unassigned address space, skip it.
331 */
332 if (!pRam)
333 break;
334 size_t cb = pRam->GCPhys - GCPhys;
335 if (cb >= cbWrite)
336 break;
337 cbWrite -= cb;
338 pvBuf = (const char *)pvBuf + cb;
339 GCPhys += cb;
340 }
341 } /* Ram range walk */
342
343 pgmUnlock(pVM);
344 return VINF_SUCCESS;
345}
346
347
348#ifdef VBOX_WITH_NEW_PHYS_CODE
349/**
350 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
351 *
352 * @returns see PGMR3PhysGCPhys2CCPtrExternal
353 * @param pVM The VM handle.
354 * @param pGCPhys Pointer to the guest physical address.
355 * @param ppv Where to store the mapping address.
356 * @param pLock Where to store the lock.
357 */
358static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
359{
360 /*
361 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
362 * an access handler after it succeeds.
363 */
364 int rc = pgmLock(pVM);
365 AssertRCReturn(rc, rc);
366
367 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
368 if (RT_SUCCESS(rc))
369 {
370 PPGMPAGEMAPTLBE pTlbe;
371 int rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, *pGCPhys, &pTlbe);
372 AssertFatalRC(rc2);
373 PPGMPAGE pPage = pTlbe->pPage;
374#if 1
375 if (PGM_PAGE_IS_MMIO(pPage))
376#else
377 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
378#endif
379 {
380 PGMPhysReleasePageMappingLock(pVM, pLock);
381 rc = VERR_PGM_PHYS_PAGE_RESERVED;
382 }
383 }
384
385 pgmUnlock(pVM);
386 return rc;
387}
388#endif /* VBOX_WITH_NEW_PHYS_CODE */
389
390
391/**
392 * Requests the mapping of a guest page into ring-3, external threads.
393 *
394 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
395 * release it.
396 *
397 * This API will assume your intention is to write to the page, and will
398 * therefore replace shared and zero pages. If you do not intend to modify the
399 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
400 *
401 * @returns VBox status code.
402 * @retval VINF_SUCCESS on success.
403 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
404 * backing or if the page has any active access handlers. The caller
405 * must fall back on using PGMR3PhysWriteExternal.
406 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
407 *
408 * @param pVM The VM handle.
409 * @param GCPhys The guest physical address of the page that should be mapped.
410 * @param ppv Where to store the address corresponding to GCPhys.
411 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
412 *
413 * @remark Avoid calling this API from within critical sections (other than the
414 * PGM one) because of the deadlock risk when we have to delegating the
415 * task to an EMT.
416 * @thread Any.
417 */
418VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
419{
420 VM_ASSERT_OTHER_THREAD(pVM);
421 AssertPtr(ppv);
422 AssertPtr(pLock);
423
424#ifdef VBOX_WITH_NEW_PHYS_CODE
425 int rc = pgmLock(pVM);
426 AssertRCReturn(rc, rc);
427
428 /*
429 * Query the Physical TLB entry for the page (may fail).
430 */
431 PPGMPAGEMAPTLBE pTlbe;
432 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
433 if (RT_SUCCESS(rc))
434 {
435 PPGMPAGE pPage = pTlbe->pPage;
436#if 1
437 if (PGM_PAGE_IS_MMIO(pPage))
438 rc = VERR_PGM_PHYS_PAGE_RESERVED;
439#else
440 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
441 rc = VERR_PGM_PHYS_PAGE_RESERVED;
442#endif
443 else
444 {
445 /*
446 * If the page is shared, the zero page, or being write monitored
447 * it must be converted to an page that's writable if possible.
448 * This has to be done on an EMT.
449 */
450 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
451 {
452 pgmUnlock(pVM);
453
454 PVMREQ pReq = NULL;
455 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
456 (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4, pVM, &GCPhys, ppv, pLock);
457 if (RT_SUCCESS(rc))
458 {
459 rc = pReq->iStatus;
460 VMR3ReqFree(pReq);
461 }
462 return rc;
463 }
464
465 /*
466 * Now, just perform the locking and calculate the return address.
467 */
468 PPGMPAGEMAP pMap = pTlbe->pMap;
469 pMap->cRefs++;
470#if 0 /** @todo implement locking properly */
471 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
472 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
473 {
474 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
475 pMap->cRefs++; /* Extra ref to prevent it from going away. */
476 }
477#endif
478 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
479 pLock->pvPage = pPage;
480 pLock->pvMap = pMap;
481 }
482 }
483
484 pgmUnlock(pVM);
485 return rc;
486
487#else /* !VBOX_WITH_NEW_PHYS_CODE */
488 /*
489 * Fallback code.
490 */
491 return PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1, (PRTR3PTR)ppv);
492#endif /* !VBOX_WITH_NEW_PHYS_CODE */
493}
494
495
496/**
497 * Requests the mapping of a guest page into ring-3, external threads.
498 *
499 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
500 * release it.
501 *
502 * @returns VBox status code.
503 * @retval VINF_SUCCESS on success.
504 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
505 * backing or if the page as an active ALL access handler. The caller
506 * must fall back on using PGMPhysRead.
507 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
508 *
509 * @param pVM The VM handle.
510 * @param GCPhys The guest physical address of the page that should be mapped.
511 * @param ppv Where to store the address corresponding to GCPhys.
512 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
513 *
514 * @remark Avoid calling this API from within critical sections (other than
515 * the PGM one) because of the deadlock risk.
516 * @thread Any.
517 */
518VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
519{
520#ifdef VBOX_WITH_NEW_PHYS_CODE
521 int rc = pgmLock(pVM);
522 AssertRCReturn(rc, rc);
523
524 /*
525 * Query the Physical TLB entry for the page (may fail).
526 */
527 PPGMPAGEMAPTLBE pTlbe;
528 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
529 if (RT_SUCCESS(rc))
530 {
531 PPGMPAGE pPage = pTlbe->pPage;
532#if 1
533 /* MMIO pages doesn't have any readable backing. */
534 if (PGM_PAGE_IS_MMIO(pPage))
535 rc = VERR_PGM_PHYS_PAGE_RESERVED;
536#else
537 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
538 rc = VERR_PGM_PHYS_PAGE_RESERVED;
539#endif
540 else
541 {
542 /*
543 * Now, just perform the locking and calculate the return address.
544 */
545 PPGMPAGEMAP pMap = pTlbe->pMap;
546 pMap->cRefs++;
547#if 0 /** @todo implement locking properly */
548 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
549 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
550 {
551 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
552 pMap->cRefs++; /* Extra ref to prevent it from going away. */
553 }
554#endif
555 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
556 pLock->pvPage = pPage;
557 pLock->pvMap = pMap;
558 }
559 }
560
561 pgmUnlock(pVM);
562 return rc;
563
564#else /* !VBOX_WITH_NEW_PHYS_CODE */
565 /*
566 * Fallback code.
567 */
568 return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
569#endif /* !VBOX_WITH_NEW_PHYS_CODE */
570}
571
572
573/**
574 * Links a new RAM range into the list.
575 *
576 * @param pVM Pointer to the shared VM structure.
577 * @param pNew Pointer to the new list entry.
578 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
579 */
580static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
581{
582 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
583
584 pgmLock(pVM);
585
586 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
587 pNew->pNextR3 = pRam;
588 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
589 pNew->pNextRC = pRam ? MMHyperCCToRC(pVM, pRam) : NIL_RTRCPTR;
590
591 if (pPrev)
592 {
593 pPrev->pNextR3 = pNew;
594 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
595 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
596 }
597 else
598 {
599 pVM->pgm.s.pRamRangesR3 = pNew;
600 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
601 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
602 }
603
604 pgmUnlock(pVM);
605}
606
607
608/**
609 * Unlink an existing RAM range from the list.
610 *
611 * @param pVM Pointer to the shared VM structure.
612 * @param pRam Pointer to the new list entry.
613 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
614 */
615static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
616{
617 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
618
619 pgmLock(pVM);
620
621 PPGMRAMRANGE pNext = pRam->pNextR3;
622 if (pPrev)
623 {
624 pPrev->pNextR3 = pNext;
625 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
626 pPrev->pNextRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
627 }
628 else
629 {
630 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
631 pVM->pgm.s.pRamRangesR3 = pNext;
632 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
633 pVM->pgm.s.pRamRangesRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
634 }
635
636 pgmUnlock(pVM);
637}
638
639
640/**
641 * Unlink an existing RAM range from the list.
642 *
643 * @param pVM Pointer to the shared VM structure.
644 * @param pRam Pointer to the new list entry.
645 */
646static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
647{
648 /* find prev. */
649 PPGMRAMRANGE pPrev = NULL;
650 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
651 while (pCur != pRam)
652 {
653 pPrev = pCur;
654 pCur = pCur->pNextR3;
655 }
656 AssertFatal(pCur);
657
658 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
659}
660
661
662#ifdef VBOX_WITH_NEW_PHYS_CODE
663/**
664 * Frees a range of pages, replacing them with ZERO pages of the specified type.
665 *
666 * @returns VBox status code.
667 * @param pVM The VM handle.
668 * @param pRam The RAM range in which the pages resides.
669 * @param GCPhys The address of the first page.
670 * @param GCPhysLast The address of the last page.
671 * @param uType The page type to replace then with.
672 */
673static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
674{
675 uint32_t cPendingPages = 0;
676 PGMMFREEPAGESREQ pReq;
677 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
678 AssertLogRelRCReturn(rc, rc);
679
680 /* Itegerate the pages. */
681 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
682 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
683 while (cPagesLeft-- > 0)
684 {
685 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
686 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
687
688 PGM_PAGE_SET_TYPE(pPageDst, uType);
689
690 GCPhys += PAGE_SIZE;
691 pPageDst++;
692 }
693
694 if (cPendingPages)
695 {
696 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
697 AssertLogRelRCReturn(rc, rc);
698 }
699 GMMR3FreePagesCleanup(pReq);
700
701 return rc;
702}
703#endif /* VBOX_WITH_NEW_PHYS_CODE */
704
705
706/**
707 * Sets up a range RAM.
708 *
709 * This will check for conflicting registrations, make a resource
710 * reservation for the memory (with GMM), and setup the per-page
711 * tracking structures (PGMPAGE).
712 *
713 * @returns VBox stutus code.
714 * @param pVM Pointer to the shared VM structure.
715 * @param GCPhys The physical address of the RAM.
716 * @param cb The size of the RAM.
717 * @param pszDesc The description - not copied, so, don't free or change it.
718 */
719VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
720{
721 /*
722 * Validate input.
723 */
724 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
725 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
726 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
727 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
728 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
729 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
730 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
731 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
732
733 /*
734 * Find range location and check for conflicts.
735 * (We don't lock here because the locking by EMT is only required on update.)
736 */
737 PPGMRAMRANGE pPrev = NULL;
738 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
739 while (pRam && GCPhysLast >= pRam->GCPhys)
740 {
741 if ( GCPhysLast >= pRam->GCPhys
742 && GCPhys <= pRam->GCPhysLast)
743 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
744 GCPhys, GCPhysLast, pszDesc,
745 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
746 VERR_PGM_RAM_CONFLICT);
747
748 /* next */
749 pPrev = pRam;
750 pRam = pRam->pNextR3;
751 }
752
753 /*
754 * Register it with GMM (the API bitches).
755 */
756 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
757 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
758 if (RT_FAILURE(rc))
759 return rc;
760
761 /*
762 * Allocate RAM range.
763 */
764 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
765 PPGMRAMRANGE pNew;
766 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
767 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
768
769 /*
770 * Initialize the range.
771 */
772 pNew->GCPhys = GCPhys;
773 pNew->GCPhysLast = GCPhysLast;
774 pNew->pszDesc = pszDesc;
775 pNew->cb = cb;
776 pNew->fFlags = 0;
777
778 pNew->pvR3 = NULL;
779#ifndef VBOX_WITH_NEW_PHYS_CODE
780 pNew->paChunkR3Ptrs = NULL;
781
782 /* Allocate memory for chunk to HC ptr lookup array. */
783 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
784 AssertRCReturn(rc, rc);
785 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
786
787#endif
788 RTGCPHYS iPage = cPages;
789 while (iPage-- > 0)
790 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
791
792 /* Update the page count stats. */
793 pVM->pgm.s.cZeroPages += cPages;
794 pVM->pgm.s.cAllPages += cPages;
795
796 /*
797 * Insert the new RAM range.
798 */
799 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
800
801 /*
802 * Notify REM.
803 */
804#ifdef VBOX_WITH_NEW_PHYS_CODE
805 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
806#else
807 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
808#endif
809
810 return VINF_SUCCESS;
811}
812
813
814/**
815 * Resets (zeros) the RAM.
816 *
817 * ASSUMES that the caller owns the PGM lock.
818 *
819 * @returns VBox status code.
820 * @param pVM Pointer to the shared VM structure.
821 */
822int pgmR3PhysRamReset(PVM pVM)
823{
824#ifdef VBOX_WITH_NEW_PHYS_CODE
825 /*
826 * We batch up pages before freeing them.
827 */
828 uint32_t cPendingPages = 0;
829 PGMMFREEPAGESREQ pReq;
830 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
831 AssertLogRelRCReturn(rc, rc);
832#endif
833
834 /*
835 * Walk the ram ranges.
836 */
837 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
838 {
839 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
840#ifdef VBOX_WITH_NEW_PHYS_CODE
841 if (!pVM->pgm.s.fRamPreAlloc)
842 {
843 /* Replace all RAM pages by ZERO pages. */
844 while (iPage-- > 0)
845 {
846 PPGMPAGE pPage = &pRam->aPages[iPage];
847 switch (PGM_PAGE_GET_TYPE(pPage))
848 {
849 case PGMPAGETYPE_RAM:
850 if (!PGM_PAGE_IS_ZERO(pPage))
851 {
852 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
853 AssertLogRelRCReturn(rc, rc);
854 }
855 break;
856
857 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
858 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
859 break;
860
861 case PGMPAGETYPE_MMIO2:
862 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
863 case PGMPAGETYPE_ROM:
864 case PGMPAGETYPE_MMIO:
865 break;
866 default:
867 AssertFailed();
868 }
869 } /* for each page */
870 }
871 else
872#endif
873 {
874 /* Zero the memory. */
875 while (iPage-- > 0)
876 {
877 PPGMPAGE pPage = &pRam->aPages[iPage];
878 switch (PGM_PAGE_GET_TYPE(pPage))
879 {
880#ifndef VBOX_WITH_NEW_PHYS_CODE
881 case PGMPAGETYPE_INVALID:
882 case PGMPAGETYPE_RAM:
883 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
884 {
885 /* shadow ram is reloaded elsewhere. */
886 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
887 continue;
888 }
889 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
890 {
891 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
892 if (pRam->paChunkR3Ptrs[iChunk])
893 ASMMemZero32((char *)pRam->paChunkR3Ptrs[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
894 }
895 else
896 ASMMemZero32((char *)pRam->pvR3 + (iPage << PAGE_SHIFT), PAGE_SIZE);
897 break;
898#else /* VBOX_WITH_NEW_PHYS_CODE */
899 case PGMPAGETYPE_RAM:
900 switch (PGM_PAGE_GET_STATE(pPage))
901 {
902 case PGM_PAGE_STATE_ZERO:
903 break;
904 case PGM_PAGE_STATE_SHARED:
905 case PGM_PAGE_STATE_WRITE_MONITORED:
906 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
907 AssertLogRelRCReturn(rc, rc);
908 case PGM_PAGE_STATE_ALLOCATED:
909 {
910 void *pvPage;
911 PPGMPAGEMAP pMapIgnored;
912 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pMapIgnored, &pvPage);
913 AssertLogRelRCReturn(rc, rc);
914 ASMMemZeroPage(pvPage);
915 break;
916 }
917 }
918 break;
919#endif /* VBOX_WITH_NEW_PHYS_CODE */
920
921 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
922 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
923 break;
924
925 case PGMPAGETYPE_MMIO2:
926 case PGMPAGETYPE_ROM_SHADOW:
927 case PGMPAGETYPE_ROM:
928 case PGMPAGETYPE_MMIO:
929 break;
930 default:
931 AssertFailed();
932
933 }
934 } /* for each page */
935 }
936
937 }
938
939#ifdef VBOX_WITH_NEW_PHYS_CODE
940 /*
941 * Finish off any pages pending freeing.
942 */
943 if (cPendingPages)
944 {
945 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
946 AssertLogRelRCReturn(rc, rc);
947 }
948 GMMR3FreePagesCleanup(pReq);
949#endif
950
951
952 return VINF_SUCCESS;
953}
954
955
956/**
957 * This is the interface IOM is using to register an MMIO region.
958 *
959 * It will check for conflicts and ensure that a RAM range structure
960 * is present before calling the PGMR3HandlerPhysicalRegister API to
961 * register the callbacks.
962 *
963 * @returns VBox status code.
964 *
965 * @param pVM Pointer to the shared VM structure.
966 * @param GCPhys The start of the MMIO region.
967 * @param cb The size of the MMIO region.
968 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
969 * @param pvUserR3 The user argument for R3.
970 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
971 * @param pvUserR0 The user argument for R0.
972 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
973 * @param pvUserRC The user argument for RC.
974 * @param pszDesc The description of the MMIO region.
975 */
976VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
977 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
978 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
979 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
980 R3PTRTYPE(const char *) pszDesc)
981{
982 /*
983 * Assert on some assumption.
984 */
985 VM_ASSERT_EMT(pVM);
986 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
987 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
988 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
989 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
990
991 /*
992 * Make sure there's a RAM range structure for the region.
993 */
994 int rc;
995 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
996 bool fRamExists = false;
997 PPGMRAMRANGE pRamPrev = NULL;
998 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
999 while (pRam && GCPhysLast >= pRam->GCPhys)
1000 {
1001 if ( GCPhysLast >= pRam->GCPhys
1002 && GCPhys <= pRam->GCPhysLast)
1003 {
1004 /* Simplification: all within the same range. */
1005 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1006 && GCPhysLast <= pRam->GCPhysLast,
1007 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
1008 GCPhys, GCPhysLast, pszDesc,
1009 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1010 VERR_PGM_RAM_CONFLICT);
1011
1012 /* Check that it's all RAM or MMIO pages. */
1013 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1014 uint32_t cLeft = cb >> PAGE_SHIFT;
1015 while (cLeft-- > 0)
1016 {
1017 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
1018 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
1019 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
1020 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
1021 VERR_PGM_RAM_CONFLICT);
1022 pPage++;
1023 }
1024
1025 /* Looks good. */
1026 fRamExists = true;
1027 break;
1028 }
1029
1030 /* next */
1031 pRamPrev = pRam;
1032 pRam = pRam->pNextR3;
1033 }
1034 PPGMRAMRANGE pNew;
1035 if (fRamExists)
1036 {
1037 pNew = NULL;
1038#ifdef VBOX_WITH_NEW_PHYS_CODE
1039 /*
1040 * Make all the pages in the range MMIO/ZERO pages, freeing any
1041 * RAM pages currently mapped here. This might not be 100% correct
1042 * for PCI memory, but we're doing the same thing for MMIO2 pages.
1043 */
1044 rc = pgmLock(pVM);
1045 if (RT_SUCCESS(rc))
1046 {
1047 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
1048 pgmUnlock(pVM);
1049 }
1050 AssertRCReturn(rc, rc);
1051#endif
1052 }
1053 else
1054 {
1055 /*
1056 * No RAM range, insert an ad-hoc one.
1057 *
1058 * Note that we don't have to tell REM about this range because
1059 * PGMHandlerPhysicalRegisterEx will do that for us.
1060 */
1061 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
1062
1063 const uint32_t cPages = cb >> PAGE_SHIFT;
1064 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1065 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
1066 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1067
1068 /* Initialize the range. */
1069 pNew->GCPhys = GCPhys;
1070 pNew->GCPhysLast = GCPhysLast;
1071 pNew->pszDesc = pszDesc;
1072 pNew->cb = cb;
1073 pNew->fFlags = 0; /* Some MMIO flag here? */
1074
1075 pNew->pvR3 = NULL;
1076#ifndef VBOX_WITH_NEW_PHYS_CODE
1077 pNew->paChunkR3Ptrs = NULL;
1078#endif
1079
1080 uint32_t iPage = cPages;
1081 while (iPage-- > 0)
1082 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
1083 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
1084
1085 /* update the page count stats. */
1086 pVM->pgm.s.cZeroPages += cPages;
1087 pVM->pgm.s.cAllPages += cPages;
1088
1089 /* link it */
1090 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
1091 }
1092
1093 /*
1094 * Register the access handler.
1095 */
1096 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
1097 pfnHandlerR3, pvUserR3,
1098 pfnHandlerR0, pvUserR0,
1099 pfnHandlerRC, pvUserRC, pszDesc);
1100 if ( RT_FAILURE(rc)
1101 && !fRamExists)
1102 {
1103 pVM->pgm.s.cZeroPages -= cb >> PAGE_SHIFT;
1104 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
1105
1106 /* remove the ad-hoc range. */
1107 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
1108 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
1109 MMHyperFree(pVM, pRam);
1110 }
1111
1112 return rc;
1113}
1114
1115
1116/**
1117 * This is the interface IOM is using to register an MMIO region.
1118 *
1119 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
1120 * any ad-hoc PGMRAMRANGE left behind.
1121 *
1122 * @returns VBox status code.
1123 * @param pVM Pointer to the shared VM structure.
1124 * @param GCPhys The start of the MMIO region.
1125 * @param cb The size of the MMIO region.
1126 */
1127VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
1128{
1129 VM_ASSERT_EMT(pVM);
1130
1131 /*
1132 * First deregister the handler, then check if we should remove the ram range.
1133 */
1134 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1135 if (RT_SUCCESS(rc))
1136 {
1137 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1138 PPGMRAMRANGE pRamPrev = NULL;
1139 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1140 while (pRam && GCPhysLast >= pRam->GCPhys)
1141 {
1142 /** @todo We're being a bit too careful here. rewrite. */
1143 if ( GCPhysLast == pRam->GCPhysLast
1144 && GCPhys == pRam->GCPhys)
1145 {
1146 Assert(pRam->cb == cb);
1147
1148 /*
1149 * See if all the pages are dead MMIO pages.
1150 */
1151 uint32_t const cPages = cb >> PAGE_SHIFT;
1152 bool fAllMMIO = true;
1153 uint32_t iPage = 0;
1154 uint32_t cLeft = cPages;
1155 while (cLeft-- > 0)
1156 {
1157 PPGMPAGE pPage = &pRam->aPages[iPage];
1158 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
1159 /*|| not-out-of-action later */)
1160 {
1161 fAllMMIO = false;
1162#ifdef VBOX_WITH_NEW_PHYS_CODE
1163 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1164 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1165#endif
1166 break;
1167 }
1168 Assert(PGM_PAGE_IS_ZERO(pPage));
1169 pPage++;
1170 }
1171 if (fAllMMIO)
1172 {
1173 /*
1174 * Ad-hoc range, unlink and free it.
1175 */
1176 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
1177 GCPhys, GCPhysLast, pRam->pszDesc));
1178
1179 pVM->pgm.s.cAllPages -= cPages;
1180 pVM->pgm.s.cZeroPages -= cPages;
1181
1182 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
1183 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
1184 MMHyperFree(pVM, pRam);
1185 break;
1186 }
1187 }
1188
1189#ifdef VBOX_WITH_NEW_PHYS_CODE
1190 /*
1191 * Range match? It will all be within one range (see PGMAllHandler.cpp).
1192 */
1193 if ( GCPhysLast >= pRam->GCPhys
1194 && GCPhys <= pRam->GCPhysLast)
1195 {
1196 Assert(GCPhys >= pRam->GCPhys);
1197 Assert(GCPhysLast <= pRam->GCPhysLast);
1198
1199 /*
1200 * Turn the pages back into RAM pages.
1201 */
1202 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1203 uint32_t cLeft = cb >> PAGE_SHIFT;
1204 while (cLeft--)
1205 {
1206 PPGMPAGE pPage = &pRam->aPages[iPage];
1207 AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1208 AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1209 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
1210 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_RAM);
1211 }
1212 break;
1213 }
1214#endif
1215
1216 /* next */
1217 pRamPrev = pRam;
1218 pRam = pRam->pNextR3;
1219 }
1220 }
1221
1222 return rc;
1223}
1224
1225
1226/**
1227 * Locate a MMIO2 range.
1228 *
1229 * @returns Pointer to the MMIO2 range.
1230 * @param pVM Pointer to the shared VM structure.
1231 * @param pDevIns The device instance owning the region.
1232 * @param iRegion The region.
1233 */
1234DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1235{
1236 /*
1237 * Search the list.
1238 */
1239 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1240 if ( pCur->pDevInsR3 == pDevIns
1241 && pCur->iRegion == iRegion)
1242 return pCur;
1243 return NULL;
1244}
1245
1246
1247/**
1248 * Allocate and register an MMIO2 region.
1249 *
1250 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
1251 * RAM associated with a device. It is also non-shared memory with a
1252 * permanent ring-3 mapping and page backing (presently).
1253 *
1254 * A MMIO2 range may overlap with base memory if a lot of RAM
1255 * is configured for the VM, in which case we'll drop the base
1256 * memory pages. Presently we will make no attempt to preserve
1257 * anything that happens to be present in the base memory that
1258 * is replaced, this is of course incorrectly but it's too much
1259 * effort.
1260 *
1261 * @returns VBox status code.
1262 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
1263 * @retval VERR_ALREADY_EXISTS if the region already exists.
1264 *
1265 * @param pVM Pointer to the shared VM structure.
1266 * @param pDevIns The device instance owning the region.
1267 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
1268 * this number has to be the number of that region. Otherwise
1269 * it can be any number safe UINT8_MAX.
1270 * @param cb The size of the region. Must be page aligned.
1271 * @param fFlags Reserved for future use, must be zero.
1272 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
1273 * @param pszDesc The description.
1274 */
1275VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
1276{
1277 /*
1278 * Validate input.
1279 */
1280 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1281 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1282 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1283 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
1284 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1285 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1286 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
1287 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1288 AssertReturn(cb, VERR_INVALID_PARAMETER);
1289 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
1290
1291 const uint32_t cPages = cb >> PAGE_SHIFT;
1292 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
1293 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
1294
1295 /*
1296 * Try reserve and allocate the backing memory first as this is what is
1297 * most likely to fail.
1298 */
1299 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
1300 if (RT_FAILURE(rc))
1301 return rc;
1302
1303 void *pvPages;
1304 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
1305 if (RT_SUCCESS(rc))
1306 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
1307 if (RT_SUCCESS(rc))
1308 {
1309 memset(pvPages, 0, cPages * PAGE_SIZE);
1310
1311 /*
1312 * Create the MMIO2 range record for it.
1313 */
1314 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
1315 PPGMMMIO2RANGE pNew;
1316 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1317 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
1318 if (RT_SUCCESS(rc))
1319 {
1320 pNew->pDevInsR3 = pDevIns;
1321 pNew->pvR3 = pvPages;
1322 //pNew->pNext = NULL;
1323 //pNew->fMapped = false;
1324 //pNew->fOverlapping = false;
1325 pNew->iRegion = iRegion;
1326 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
1327 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
1328 pNew->RamRange.pszDesc = pszDesc;
1329 pNew->RamRange.cb = cb;
1330 //pNew->RamRange.fFlags = 0;
1331
1332 pNew->RamRange.pvR3 = pvPages; ///@todo remove this [new phys code]
1333#ifndef VBOX_WITH_NEW_PHYS_CODE
1334 pNew->RamRange.paChunkR3Ptrs = NULL; ///@todo remove this [new phys code]
1335#endif
1336
1337 uint32_t iPage = cPages;
1338 while (iPage-- > 0)
1339 {
1340 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
1341 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1342 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1343 }
1344
1345 /* update page count stats */
1346 pVM->pgm.s.cAllPages += cPages;
1347 pVM->pgm.s.cPrivatePages += cPages;
1348
1349 /*
1350 * Link it into the list.
1351 * Since there is no particular order, just push it.
1352 */
1353 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
1354 pVM->pgm.s.pMmio2RangesR3 = pNew;
1355
1356 *ppv = pvPages;
1357 RTMemTmpFree(paPages);
1358 return VINF_SUCCESS;
1359 }
1360
1361 SUPR3PageFreeEx(pvPages, cPages);
1362 }
1363 RTMemTmpFree(paPages);
1364 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
1365 return rc;
1366}
1367
1368
1369/**
1370 * Deregisters and frees an MMIO2 region.
1371 *
1372 * Any physical (and virtual) access handlers registered for the region must
1373 * be deregistered before calling this function.
1374 *
1375 * @returns VBox status code.
1376 * @param pVM Pointer to the shared VM structure.
1377 * @param pDevIns The device instance owning the region.
1378 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
1379 */
1380VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1381{
1382 /*
1383 * Validate input.
1384 */
1385 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1386 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1387 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
1388
1389 int rc = VINF_SUCCESS;
1390 unsigned cFound = 0;
1391 PPGMMMIO2RANGE pPrev = NULL;
1392 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
1393 while (pCur)
1394 {
1395 if ( pCur->pDevInsR3 == pDevIns
1396 && ( iRegion == UINT32_MAX
1397 || pCur->iRegion == iRegion))
1398 {
1399 cFound++;
1400
1401 /*
1402 * Unmap it if it's mapped.
1403 */
1404 if (pCur->fMapped)
1405 {
1406 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
1407 AssertRC(rc2);
1408 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1409 rc = rc2;
1410 }
1411
1412 /*
1413 * Unlink it
1414 */
1415 PPGMMMIO2RANGE pNext = pCur->pNextR3;
1416 if (pPrev)
1417 pPrev->pNextR3 = pNext;
1418 else
1419 pVM->pgm.s.pMmio2RangesR3 = pNext;
1420 pCur->pNextR3 = NULL;
1421
1422 /*
1423 * Free the memory.
1424 */
1425 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
1426 AssertRC(rc2);
1427 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1428 rc = rc2;
1429
1430 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
1431 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
1432 AssertRC(rc2);
1433 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1434 rc = rc2;
1435
1436 /* we're leaking hyper memory here if done at runtime. */
1437 Assert( VMR3GetState(pVM) == VMSTATE_OFF
1438 || VMR3GetState(pVM) == VMSTATE_DESTROYING
1439 || VMR3GetState(pVM) == VMSTATE_TERMINATED
1440 || VMR3GetState(pVM) == VMSTATE_CREATING);
1441 /*rc = MMHyperFree(pVM, pCur);
1442 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
1443
1444
1445 /* update page count stats */
1446 pVM->pgm.s.cAllPages -= cPages;
1447 pVM->pgm.s.cPrivatePages -= cPages;
1448
1449 /* next */
1450 pCur = pNext;
1451 }
1452 else
1453 {
1454 pPrev = pCur;
1455 pCur = pCur->pNextR3;
1456 }
1457 }
1458
1459 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
1460}
1461
1462
1463/**
1464 * Maps a MMIO2 region.
1465 *
1466 * This is done when a guest / the bios / state loading changes the
1467 * PCI config. The replacing of base memory has the same restrictions
1468 * as during registration, of course.
1469 *
1470 * @returns VBox status code.
1471 *
1472 * @param pVM Pointer to the shared VM structure.
1473 * @param pDevIns The
1474 */
1475VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1476{
1477 /*
1478 * Validate input
1479 */
1480 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1481 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1482 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1483 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1484 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1485 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1486
1487 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1488 AssertReturn(pCur, VERR_NOT_FOUND);
1489 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
1490 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
1491 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
1492
1493 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
1494 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1495
1496 /*
1497 * Find our location in the ram range list, checking for
1498 * restriction we don't bother implementing yet (partially overlapping).
1499 */
1500 bool fRamExists = false;
1501 PPGMRAMRANGE pRamPrev = NULL;
1502 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1503 while (pRam && GCPhysLast >= pRam->GCPhys)
1504 {
1505 if ( GCPhys <= pRam->GCPhysLast
1506 && GCPhysLast >= pRam->GCPhys)
1507 {
1508 /* completely within? */
1509 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1510 && GCPhysLast <= pRam->GCPhysLast,
1511 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
1512 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
1513 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1514 VERR_PGM_RAM_CONFLICT);
1515 fRamExists = true;
1516 break;
1517 }
1518
1519 /* next */
1520 pRamPrev = pRam;
1521 pRam = pRam->pNextR3;
1522 }
1523 if (fRamExists)
1524 {
1525 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1526 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1527 while (cPagesLeft-- > 0)
1528 {
1529 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1530 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
1531 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
1532 VERR_PGM_RAM_CONFLICT);
1533 pPage++;
1534 }
1535 }
1536 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
1537 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
1538
1539 /*
1540 * Make the changes.
1541 */
1542 pgmLock(pVM);
1543
1544 pCur->RamRange.GCPhys = GCPhys;
1545 pCur->RamRange.GCPhysLast = GCPhysLast;
1546 pCur->fMapped = true;
1547 pCur->fOverlapping = fRamExists;
1548
1549 if (fRamExists)
1550 {
1551/** @todo use pgmR3PhysFreePageRange here. */
1552 uint32_t cPendingPages = 0;
1553 PGMMFREEPAGESREQ pReq;
1554 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1555 AssertLogRelRCReturn(rc, rc);
1556
1557 /* replace the pages, freeing all present RAM pages. */
1558 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
1559 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1560 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1561 while (cPagesLeft-- > 0)
1562 {
1563 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
1564 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1565
1566 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
1567 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
1568 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
1569 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
1570
1571 pVM->pgm.s.cZeroPages--;
1572 GCPhys += PAGE_SIZE;
1573 pPageSrc++;
1574 pPageDst++;
1575 }
1576
1577 if (cPendingPages)
1578 {
1579 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1580 AssertLogRelRCReturn(rc, rc);
1581 }
1582 GMMR3FreePagesCleanup(pReq);
1583 }
1584 else
1585 {
1586 /* link in the ram range */
1587 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
1588 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
1589 }
1590
1591 pgmUnlock(pVM);
1592
1593 return VINF_SUCCESS;
1594}
1595
1596
1597/**
1598 * Unmaps a MMIO2 region.
1599 *
1600 * This is done when a guest / the bios / state loading changes the
1601 * PCI config. The replacing of base memory has the same restrictions
1602 * as during registration, of course.
1603 */
1604VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1605{
1606 /*
1607 * Validate input
1608 */
1609 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1610 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1611 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1612 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1613 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1614 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1615
1616 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1617 AssertReturn(pCur, VERR_NOT_FOUND);
1618 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
1619 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
1620 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
1621
1622 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
1623 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
1624
1625 /*
1626 * Unmap it.
1627 */
1628 pgmLock(pVM);
1629
1630 if (pCur->fOverlapping)
1631 {
1632 /* Restore the RAM pages we've replaced. */
1633 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1634 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
1635 pRam = pRam->pNextR3;
1636
1637 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
1638 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
1639 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1640 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1641 while (cPagesLeft-- > 0)
1642 {
1643 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhysZeroPg);
1644 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
1645 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
1646 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
1647
1648 pVM->pgm.s.cZeroPages++;
1649 pPageDst++;
1650 }
1651 }
1652 else
1653 {
1654 REMR3NotifyPhysRamDeregister(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
1655 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
1656 }
1657
1658 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
1659 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
1660 pCur->fOverlapping = false;
1661 pCur->fMapped = false;
1662
1663 pgmUnlock(pVM);
1664
1665 return VINF_SUCCESS;
1666}
1667
1668
1669/**
1670 * Checks if the given address is an MMIO2 base address or not.
1671 *
1672 * @returns true/false accordingly.
1673 * @param pVM Pointer to the shared VM structure.
1674 * @param pDevIns The owner of the memory, optional.
1675 * @param GCPhys The address to check.
1676 */
1677VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1678{
1679 /*
1680 * Validate input
1681 */
1682 VM_ASSERT_EMT_RETURN(pVM, false);
1683 AssertPtrReturn(pDevIns, false);
1684 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1685 AssertReturn(GCPhys != 0, false);
1686 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1687
1688 /*
1689 * Search the list.
1690 */
1691 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1692 if (pCur->RamRange.GCPhys == GCPhys)
1693 {
1694 Assert(pCur->fMapped);
1695 return true;
1696 }
1697 return false;
1698}
1699
1700
1701/**
1702 * Gets the HC physical address of a page in the MMIO2 region.
1703 *
1704 * This is API is intended for MMHyper and shouldn't be called
1705 * by anyone else...
1706 *
1707 * @returns VBox status code.
1708 * @param pVM Pointer to the shared VM structure.
1709 * @param pDevIns The owner of the memory, optional.
1710 * @param iRegion The region.
1711 * @param off The page expressed an offset into the MMIO2 region.
1712 * @param pHCPhys Where to store the result.
1713 */
1714VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1715{
1716 /*
1717 * Validate input
1718 */
1719 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1720 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1721 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1722
1723 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1724 AssertReturn(pCur, VERR_NOT_FOUND);
1725 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1726
1727 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1728 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1729 return VINF_SUCCESS;
1730}
1731
1732
1733/**
1734 * Maps a portion of an MMIO2 region into kernel space (host).
1735 *
1736 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
1737 * or the VM is terminated.
1738 *
1739 * @return VBox status code.
1740 *
1741 * @param pVM Pointer to the shared VM structure.
1742 * @param pDevIns The device owning the MMIO2 memory.
1743 * @param iRegion The region.
1744 * @param off The offset into the region. Must be page aligned.
1745 * @param cb The number of bytes to map. Must be page aligned.
1746 * @param pszDesc Mapping description.
1747 * @param pR0Ptr Where to store the R0 address.
1748 */
1749VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
1750 const char *pszDesc, PRTR0PTR pR0Ptr)
1751{
1752 /*
1753 * Validate input.
1754 */
1755 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1756 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1757 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1758
1759 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1760 AssertReturn(pCur, VERR_NOT_FOUND);
1761 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1762 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1763 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1764
1765 /*
1766 * Pass the request on to the support library/driver.
1767 */
1768 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
1769
1770 return rc;
1771}
1772
1773
1774/**
1775 * Registers a ROM image.
1776 *
1777 * Shadowed ROM images requires double the amount of backing memory, so,
1778 * don't use that unless you have to. Shadowing of ROM images is process
1779 * where we can select where the reads go and where the writes go. On real
1780 * hardware the chipset provides means to configure this. We provide
1781 * PGMR3PhysProtectROM() for this purpose.
1782 *
1783 * A read-only copy of the ROM image will always be kept around while we
1784 * will allocate RAM pages for the changes on demand (unless all memory
1785 * is configured to be preallocated).
1786 *
1787 * @returns VBox status.
1788 * @param pVM VM Handle.
1789 * @param pDevIns The device instance owning the ROM.
1790 * @param GCPhys First physical address in the range.
1791 * Must be page aligned!
1792 * @param cbRange The size of the range (in bytes).
1793 * Must be page aligned!
1794 * @param pvBinary Pointer to the binary data backing the ROM image.
1795 * This must be exactly \a cbRange in size.
1796 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
1797 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
1798 * @param pszDesc Pointer to description string. This must not be freed.
1799 *
1800 * @remark There is no way to remove the rom, automatically on device cleanup or
1801 * manually from the device yet. This isn't difficult in any way, it's
1802 * just not something we expect to be necessary for a while.
1803 */
1804VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1805 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
1806{
1807 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
1808 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
1809
1810 /*
1811 * Validate input.
1812 */
1813 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1814 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1815 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1816 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1817 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1818 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
1819 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1820 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
1821 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
1822
1823 const uint32_t cPages = cb >> PAGE_SHIFT;
1824
1825 /*
1826 * Find the ROM location in the ROM list first.
1827 */
1828 PPGMROMRANGE pRomPrev = NULL;
1829 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
1830 while (pRom && GCPhysLast >= pRom->GCPhys)
1831 {
1832 if ( GCPhys <= pRom->GCPhysLast
1833 && GCPhysLast >= pRom->GCPhys)
1834 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1835 GCPhys, GCPhysLast, pszDesc,
1836 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
1837 VERR_PGM_RAM_CONFLICT);
1838 /* next */
1839 pRomPrev = pRom;
1840 pRom = pRom->pNextR3;
1841 }
1842
1843 /*
1844 * Find the RAM location and check for conflicts.
1845 *
1846 * Conflict detection is a bit different than for RAM
1847 * registration since a ROM can be located within a RAM
1848 * range. So, what we have to check for is other memory
1849 * types (other than RAM that is) and that we don't span
1850 * more than one RAM range (layz).
1851 */
1852 bool fRamExists = false;
1853 PPGMRAMRANGE pRamPrev = NULL;
1854 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1855 while (pRam && GCPhysLast >= pRam->GCPhys)
1856 {
1857 if ( GCPhys <= pRam->GCPhysLast
1858 && GCPhysLast >= pRam->GCPhys)
1859 {
1860 /* completely within? */
1861 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1862 && GCPhysLast <= pRam->GCPhysLast,
1863 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
1864 GCPhys, GCPhysLast, pszDesc,
1865 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1866 VERR_PGM_RAM_CONFLICT);
1867 fRamExists = true;
1868 break;
1869 }
1870
1871 /* next */
1872 pRamPrev = pRam;
1873 pRam = pRam->pNextR3;
1874 }
1875 if (fRamExists)
1876 {
1877 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1878 uint32_t cPagesLeft = cPages;
1879 while (cPagesLeft-- > 0)
1880 {
1881 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1882 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
1883 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
1884 VERR_PGM_RAM_CONFLICT);
1885 Assert(PGM_PAGE_IS_ZERO(pPage));
1886 pPage++;
1887 }
1888 }
1889
1890 /*
1891 * Update the base memory reservation if necessary.
1892 */
1893 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
1894 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
1895 cExtraBaseCost += cPages;
1896 if (cExtraBaseCost)
1897 {
1898 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
1899 if (RT_FAILURE(rc))
1900 return rc;
1901 }
1902
1903 /*
1904 * Allocate memory for the virgin copy of the RAM.
1905 */
1906 PGMMALLOCATEPAGESREQ pReq;
1907 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
1908 AssertRCReturn(rc, rc);
1909
1910 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1911 {
1912 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
1913 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
1914 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
1915 }
1916
1917 pgmLock(pVM);
1918 rc = GMMR3AllocatePagesPerform(pVM, pReq);
1919 pgmUnlock(pVM);
1920 if (RT_FAILURE(rc))
1921 {
1922 GMMR3AllocatePagesCleanup(pReq);
1923 return rc;
1924 }
1925
1926 /*
1927 * Allocate the new ROM range and RAM range (if necessary).
1928 */
1929 PPGMROMRANGE pRomNew;
1930 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
1931 if (RT_SUCCESS(rc))
1932 {
1933 PPGMRAMRANGE pRamNew = NULL;
1934 if (!fRamExists)
1935 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
1936 if (RT_SUCCESS(rc))
1937 {
1938 pgmLock(pVM);
1939
1940 /*
1941 * Initialize and insert the RAM range (if required).
1942 */
1943 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
1944 if (!fRamExists)
1945 {
1946 pRamNew->GCPhys = GCPhys;
1947 pRamNew->GCPhysLast = GCPhysLast;
1948 pRamNew->pszDesc = pszDesc;
1949 pRamNew->cb = cb;
1950 pRamNew->fFlags = 0;
1951 pRamNew->pvR3 = NULL;
1952
1953 PPGMPAGE pPage = &pRamNew->aPages[0];
1954 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1955 {
1956 PGM_PAGE_INIT(pPage,
1957 pReq->aPages[iPage].HCPhysGCPhys,
1958 pReq->aPages[iPage].idPage,
1959 PGMPAGETYPE_ROM,
1960 PGM_PAGE_STATE_ALLOCATED);
1961
1962 pRomPage->Virgin = *pPage;
1963 }
1964
1965 pVM->pgm.s.cAllPages += cPages;
1966 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
1967 }
1968 else
1969 {
1970 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1971 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1972 {
1973 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
1974 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
1975 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1976 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
1977
1978 pRomPage->Virgin = *pPage;
1979 }
1980
1981 pRamNew = pRam;
1982
1983 pVM->pgm.s.cZeroPages -= cPages;
1984 }
1985 pVM->pgm.s.cPrivatePages += cPages;
1986
1987 pgmUnlock(pVM);
1988
1989
1990 /*
1991 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
1992 *
1993 * If it's shadowed we'll register the handler after the ROM notification
1994 * so we get the access handler callbacks that we should. If it isn't
1995 * shadowed we'll do it the other way around to make REM use the built-in
1996 * ROM behavior and not the handler behavior (which is to route all access
1997 * to PGM atm).
1998 */
1999 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2000 {
2001 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
2002 rc = PGMR3HandlerPhysicalRegister(pVM,
2003 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2004 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2005 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2006 GCPhys, GCPhysLast,
2007 pgmR3PhysRomWriteHandler, pRomNew,
2008 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2009 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2010 }
2011 else
2012 {
2013 rc = PGMR3HandlerPhysicalRegister(pVM,
2014 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2015 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2016 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2017 GCPhys, GCPhysLast,
2018 pgmR3PhysRomWriteHandler, pRomNew,
2019 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2020 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2021 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
2022 }
2023 if (RT_SUCCESS(rc))
2024 {
2025 pgmLock(pVM);
2026
2027 /*
2028 * Copy the image over to the virgin pages.
2029 * This must be done after linking in the RAM range.
2030 */
2031 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
2032 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
2033 {
2034 void *pvDstPage;
2035 PPGMPAGEMAP pMapIgnored;
2036 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
2037 if (RT_FAILURE(rc))
2038 {
2039 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
2040 break;
2041 }
2042 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
2043 }
2044 if (RT_SUCCESS(rc))
2045 {
2046 /*
2047 * Initialize the ROM range.
2048 * Note that the Virgin member of the pages has already been initialized above.
2049 */
2050 pRomNew->GCPhys = GCPhys;
2051 pRomNew->GCPhysLast = GCPhysLast;
2052 pRomNew->cb = cb;
2053 pRomNew->fFlags = fFlags;
2054 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY ? pvBinary : NULL;
2055 pRomNew->pszDesc = pszDesc;
2056
2057 for (unsigned iPage = 0; iPage < cPages; iPage++)
2058 {
2059 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
2060 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
2061 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
2062 }
2063
2064 /* update the page count stats */
2065 pVM->pgm.s.cZeroPages += cPages;
2066 pVM->pgm.s.cAllPages += cPages;
2067
2068 /*
2069 * Insert the ROM range, tell REM and return successfully.
2070 */
2071 pRomNew->pNextR3 = pRom;
2072 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
2073 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
2074
2075 if (pRomPrev)
2076 {
2077 pRomPrev->pNextR3 = pRomNew;
2078 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
2079 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
2080 }
2081 else
2082 {
2083 pVM->pgm.s.pRomRangesR3 = pRomNew;
2084 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
2085 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
2086 }
2087
2088 GMMR3AllocatePagesCleanup(pReq);
2089 pgmUnlock(pVM);
2090 return VINF_SUCCESS;
2091 }
2092
2093 /* bail out */
2094
2095 pgmUnlock(pVM);
2096 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2097 AssertRC(rc2);
2098 pgmLock(pVM);
2099 }
2100
2101 if (!fRamExists)
2102 {
2103 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
2104 MMHyperFree(pVM, pRamNew);
2105 }
2106 }
2107 MMHyperFree(pVM, pRomNew);
2108 }
2109
2110 /** @todo Purge the mapping cache or something... */
2111 GMMR3FreeAllocatedPages(pVM, pReq);
2112 GMMR3AllocatePagesCleanup(pReq);
2113 pgmUnlock(pVM);
2114 return rc;
2115}
2116
2117
2118/**
2119 * \#PF Handler callback for ROM write accesses.
2120 *
2121 * @returns VINF_SUCCESS if the handler have carried out the operation.
2122 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2123 * @param pVM VM Handle.
2124 * @param GCPhys The physical address the guest is writing to.
2125 * @param pvPhys The HC mapping of that address.
2126 * @param pvBuf What the guest is reading/writing.
2127 * @param cbBuf How much it's reading/writing.
2128 * @param enmAccessType The access type.
2129 * @param pvUser User argument.
2130 */
2131static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
2132{
2133 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
2134 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2135 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
2136 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2137 Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
2138
2139 if (enmAccessType == PGMACCESSTYPE_READ)
2140 {
2141 switch (pRomPage->enmProt)
2142 {
2143 /*
2144 * Take the default action.
2145 */
2146 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2147 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2148 case PGMROMPROT_READ_ROM_WRITE_RAM:
2149 case PGMROMPROT_READ_RAM_WRITE_RAM:
2150 return VINF_PGM_HANDLER_DO_DEFAULT;
2151
2152 default:
2153 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2154 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2155 VERR_INTERNAL_ERROR);
2156 }
2157 }
2158 else
2159 {
2160 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2161 switch (pRomPage->enmProt)
2162 {
2163 /*
2164 * Ignore writes.
2165 */
2166 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2167 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2168 return VINF_SUCCESS;
2169
2170 /*
2171 * Write to the ram page.
2172 */
2173 case PGMROMPROT_READ_ROM_WRITE_RAM:
2174 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
2175 {
2176 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
2177 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
2178
2179 /*
2180 * Take the lock, do lazy allocation, map the page and copy the data.
2181 *
2182 * Note that we have to bypass the mapping TLB since it works on
2183 * guest physical addresses and entering the shadow page would
2184 * kind of screw things up...
2185 */
2186 int rc = pgmLock(pVM);
2187 AssertRC(rc);
2188 PPGMPAGE pShadowPage = &pRomPage->Shadow;
2189 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
2190 {
2191 pShadowPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2192 AssertLogRelReturn(pShadowPage, VERR_INTERNAL_ERROR);
2193 }
2194
2195 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pShadowPage) != PGM_PAGE_STATE_ALLOCATED))
2196 {
2197 rc = pgmPhysPageMakeWritable(pVM, pShadowPage, GCPhys);
2198 if (RT_FAILURE(rc))
2199 {
2200 pgmUnlock(pVM);
2201 return rc;
2202 }
2203 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
2204 }
2205
2206 void *pvDstPage;
2207 PPGMPAGEMAP pMapIgnored;
2208 int rc2 = pgmPhysPageMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
2209 if (RT_SUCCESS(rc2))
2210 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
2211 else
2212 rc = rc2;
2213
2214 pgmUnlock(pVM);
2215 return rc;
2216 }
2217
2218 default:
2219 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2220 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2221 VERR_INTERNAL_ERROR);
2222 }
2223 }
2224}
2225
2226
2227/**
2228 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
2229 * and verify that the virgin part is untouched.
2230 *
2231 * This is done after the normal memory has been cleared.
2232 *
2233 * ASSUMES that the caller owns the PGM lock.
2234 *
2235 * @param pVM The VM handle.
2236 */
2237int pgmR3PhysRomReset(PVM pVM)
2238{
2239 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2240 {
2241 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
2242
2243 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2244 {
2245 /*
2246 * Reset the physical handler.
2247 */
2248 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
2249 AssertRCReturn(rc, rc);
2250
2251 /*
2252 * What we do with the shadow pages depends on the memory
2253 * preallocation option. If not enabled, we'll just throw
2254 * out all the dirty pages and replace them by the zero page.
2255 */
2256 if (!pVM->pgm.s.fRamPreAlloc)
2257 {
2258 /* Free the dirty pages. */
2259 uint32_t cPendingPages = 0;
2260 PGMMFREEPAGESREQ pReq;
2261 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2262 AssertRCReturn(rc, rc);
2263
2264 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2265 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
2266 {
2267 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
2268 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow, pRom->GCPhys + (iPage << PAGE_SHIFT));
2269 AssertLogRelRCReturn(rc, rc);
2270 }
2271
2272 if (cPendingPages)
2273 {
2274 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2275 AssertLogRelRCReturn(rc, rc);
2276 }
2277 GMMR3FreePagesCleanup(pReq);
2278 }
2279 else
2280 {
2281 /* clear all the shadow pages. */
2282 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2283 {
2284 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO);
2285
2286 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2287 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
2288 if (RT_FAILURE(rc))
2289 break;
2290
2291 void *pvDstPage;
2292 PPGMPAGEMAP pMapIgnored;
2293 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
2294 if (RT_FAILURE(rc))
2295 break;
2296 ASMMemZeroPage(pvDstPage);
2297 }
2298 AssertRCReturn(rc, rc);
2299 }
2300 }
2301
2302#ifdef VBOX_STRICT
2303 /*
2304 * Verify that the virgin page is unchanged if possible.
2305 */
2306 if (pRom->pvOriginal)
2307 {
2308 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
2309 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
2310 {
2311 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2312 PPGMPAGEMAP pMapIgnored;
2313 void *pvDstPage;
2314 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
2315 if (RT_FAILURE(rc))
2316 break;
2317 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
2318 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
2319 GCPhys, pRom->pszDesc));
2320 }
2321 }
2322#endif
2323 }
2324
2325 return VINF_SUCCESS;
2326}
2327
2328
2329/**
2330 * Change the shadowing of a range of ROM pages.
2331 *
2332 * This is intended for implementing chipset specific memory registers
2333 * and will not be very strict about the input. It will silently ignore
2334 * any pages that are not the part of a shadowed ROM.
2335 *
2336 * @returns VBox status code.
2337 * @retval VINF_PGM_SYNC_CR3
2338 *
2339 * @param pVM Pointer to the shared VM structure.
2340 * @param GCPhys Where to start. Page aligned.
2341 * @param cb How much to change. Page aligned.
2342 * @param enmProt The new ROM protection.
2343 */
2344VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
2345{
2346 /*
2347 * Check input
2348 */
2349 if (!cb)
2350 return VINF_SUCCESS;
2351 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2352 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2353 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2354 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2355 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
2356
2357 /*
2358 * Process the request.
2359 */
2360 int rc = VINF_SUCCESS;
2361 bool fFlushTLB = false;
2362 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2363 if ( GCPhys <= pRom->GCPhysLast
2364 && GCPhysLast >= pRom->GCPhys
2365 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
2366 {
2367 /*
2368 * Iterate the relevant pages and make necessary the changes.
2369 */
2370 bool fChanges = false;
2371 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
2372 ? pRom->cb >> PAGE_SHIFT
2373 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
2374 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2375 iPage < cPages;
2376 iPage++)
2377 {
2378 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2379 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
2380 {
2381 fChanges = true;
2382
2383 /* flush references to the page. */
2384 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
2385 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pRamPage, &fFlushTLB);
2386 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
2387 rc = rc2;
2388
2389 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2390 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2391
2392 *pOld = *pRamPage;
2393 *pRamPage = *pNew;
2394 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
2395 }
2396 pRomPage->enmProt = enmProt;
2397 }
2398
2399 /*
2400 * Reset the access handler if we made changes, no need
2401 * to optimize this.
2402 */
2403 if (fChanges)
2404 {
2405 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
2406 AssertRCReturn(rc, rc);
2407 }
2408
2409 /* Advance - cb isn't updated. */
2410 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
2411 }
2412
2413 if (fFlushTLB)
2414 PGM_INVL_GUEST_TLBS();
2415 return rc;
2416}
2417
2418#ifndef VBOX_WITH_NEW_PHYS_CODE
2419
2420/**
2421 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
2422 * registration APIs calls to inform PGM about memory registrations.
2423 *
2424 * It registers the physical memory range with PGM. MM is responsible
2425 * for the toplevel things - allocation and locking - while PGM is taking
2426 * care of all the details and implements the physical address space virtualization.
2427 *
2428 * @returns VBox status.
2429 * @param pVM The VM handle.
2430 * @param pvRam HC virtual address of the RAM range. (page aligned)
2431 * @param GCPhys GC physical address of the RAM range. (page aligned)
2432 * @param cb Size of the RAM range. (page aligned)
2433 * @param fFlags Flags, MM_RAM_*.
2434 * @param paPages Pointer an array of physical page descriptors.
2435 * @param pszDesc Description string.
2436 */
2437VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
2438{
2439 /*
2440 * Validate input.
2441 * (Not so important because callers are only MMR3PhysRegister()
2442 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
2443 */
2444 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
2445
2446 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
2447 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
2448 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
2449 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
2450 Assert(!(fFlags & ~0xfff));
2451 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2452 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
2453 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
2454 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2455 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2456 if (GCPhysLast < GCPhys)
2457 {
2458 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2459 return VERR_INVALID_PARAMETER;
2460 }
2461
2462 /*
2463 * Find range location and check for conflicts.
2464 */
2465 PPGMRAMRANGE pPrev = NULL;
2466 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
2467 while (pCur)
2468 {
2469 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
2470 {
2471 AssertMsgFailed(("Conflict! This cannot happen!\n"));
2472 return VERR_PGM_RAM_CONFLICT;
2473 }
2474 if (GCPhysLast < pCur->GCPhys)
2475 break;
2476
2477 /* next */
2478 pPrev = pCur;
2479 pCur = pCur->pNextR3;
2480 }
2481
2482 /*
2483 * Allocate RAM range.
2484 * Small ranges are allocated from the heap, big ones have separate mappings.
2485 */
2486 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
2487 PPGMRAMRANGE pNew;
2488 int rc = VERR_NO_MEMORY;
2489 if (cbRam > PAGE_SIZE / 2)
2490 { /* large */
2491 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
2492 rc = MMR3HyperAllocOnceNoRel(pVM, cbRam, PAGE_SIZE, MM_TAG_PGM_PHYS, (void **)&pNew);
2493 AssertMsgRC(rc, ("MMR3HyperAllocOnceNoRel(,%#x,,) -> %Rrc\n", cbRam, rc));
2494 }
2495 else
2496 { /* small */
2497 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
2498 AssertMsgRC(rc, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, rc));
2499 }
2500 if (RT_SUCCESS(rc))
2501 {
2502 /*
2503 * Initialize the range.
2504 */
2505 pNew->pvR3 = pvRam;
2506 pNew->GCPhys = GCPhys;
2507 pNew->GCPhysLast = GCPhysLast;
2508 pNew->cb = cb;
2509 pNew->fFlags = fFlags;
2510 pNew->paChunkR3Ptrs = NULL;
2511
2512 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
2513 if (paPages)
2514 {
2515 while (iPage-- > 0)
2516 {
2517 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
2518 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
2519 PGM_PAGE_STATE_ALLOCATED);
2520 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
2521 }
2522 }
2523 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2524 {
2525 /* Allocate memory for chunk to HC ptr lookup array. */
2526 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
2527 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, cb), rc);
2528
2529 /* Physical memory will be allocated on demand. */
2530 while (iPage-- > 0)
2531 {
2532 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
2533 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
2534 }
2535 }
2536 else
2537 {
2538 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
2539 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
2540 while (iPage-- > 0)
2541 {
2542 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
2543 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
2544 }
2545 }
2546
2547 /*
2548 * Insert the new RAM range.
2549 */
2550 pgmLock(pVM);
2551 pNew->pNextR3 = pCur;
2552 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
2553 pNew->pNextRC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTRCPTR;
2554 if (pPrev)
2555 {
2556 pPrev->pNextR3 = pNew;
2557 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
2558 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
2559 }
2560 else
2561 {
2562 pVM->pgm.s.pRamRangesR3 = pNew;
2563 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
2564 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
2565 }
2566 pgmUnlock(pVM);
2567 }
2568 return rc;
2569}
2570
2571
2572/**
2573 * Register a chunk of a the physical memory range with PGM. MM is responsible
2574 * for the toplevel things - allocation and locking - while PGM is taking
2575 * care of all the details and implements the physical address space virtualization.
2576 *
2577 *
2578 * @returns VBox status.
2579 * @param pVM The VM handle.
2580 * @param pvRam HC virtual address of the RAM range. (page aligned)
2581 * @param GCPhys GC physical address of the RAM range. (page aligned)
2582 * @param cb Size of the RAM range. (page aligned)
2583 * @param fFlags Flags, MM_RAM_*.
2584 * @param paPages Pointer an array of physical page descriptors.
2585 * @param pszDesc Description string.
2586 */
2587VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
2588{
2589 NOREF(pszDesc);
2590
2591 /*
2592 * Validate input.
2593 * (Not so important because callers are only MMR3PhysRegister()
2594 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
2595 */
2596 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
2597
2598 Assert(paPages);
2599 Assert(pvRam);
2600 Assert(!(fFlags & ~0xfff));
2601 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2602 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
2603 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
2604 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2605 Assert(VM_IS_EMT(pVM));
2606 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2607 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2608
2609 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2610 if (GCPhysLast < GCPhys)
2611 {
2612 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2613 return VERR_INVALID_PARAMETER;
2614 }
2615
2616 /*
2617 * Find existing range location.
2618 */
2619 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2620 while (pRam)
2621 {
2622 RTGCPHYS off = GCPhys - pRam->GCPhys;
2623 if ( off < pRam->cb
2624 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
2625 break;
2626
2627 pRam = pRam->CTX_SUFF(pNext);
2628 }
2629 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
2630
2631 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2632 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
2633 if (paPages)
2634 {
2635 while (iPage-- > 0)
2636 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
2637 }
2638 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
2639 pRam->paChunkR3Ptrs[off] = (uintptr_t)pvRam;
2640
2641 /* Notify the recompiler. */
2642 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
2643
2644 return VINF_SUCCESS;
2645}
2646
2647
2648/**
2649 * Allocate missing physical pages for an existing guest RAM range.
2650 *
2651 * @returns VBox status.
2652 * @param pVM The VM handle.
2653 * @param GCPhys GC physical address of the RAM range. (page aligned)
2654 */
2655VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
2656{
2657 RTGCPHYS GCPhys = *pGCPhys;
2658
2659 /*
2660 * Walk range list.
2661 */
2662 pgmLock(pVM);
2663
2664 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2665 while (pRam)
2666 {
2667 RTGCPHYS off = GCPhys - pRam->GCPhys;
2668 if ( off < pRam->cb
2669 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
2670 {
2671 bool fRangeExists = false;
2672 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
2673
2674 /* Note: A request made from another thread may end up in EMT after somebody else has already allocated the range. */
2675 if (pRam->paChunkR3Ptrs[off])
2676 fRangeExists = true;
2677
2678 pgmUnlock(pVM);
2679 if (fRangeExists)
2680 return VINF_SUCCESS;
2681 return pgmr3PhysGrowRange(pVM, GCPhys);
2682 }
2683
2684 pRam = pRam->CTX_SUFF(pNext);
2685 }
2686 pgmUnlock(pVM);
2687 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2688}
2689
2690
2691/**
2692 * Allocate missing physical pages for an existing guest RAM range.
2693 *
2694 * @returns VBox status.
2695 * @param pVM The VM handle.
2696 * @param pRamRange RAM range
2697 * @param GCPhys GC physical address of the RAM range. (page aligned)
2698 */
2699int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
2700{
2701 void *pvRam;
2702 int rc;
2703
2704 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
2705 if (!VM_IS_EMT(pVM))
2706 {
2707 PVMREQ pReq;
2708 const RTGCPHYS GCPhysParam = GCPhys;
2709
2710 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
2711
2712 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
2713 if (RT_SUCCESS(rc))
2714 {
2715 rc = pReq->iStatus;
2716 VMR3ReqFree(pReq);
2717 }
2718 return rc;
2719 }
2720
2721 /* Round down to chunk boundary */
2722 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
2723
2724 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DynRamGrow);
2725 STAM_COUNTER_ADD(&pVM->pgm.s.StatR3DynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
2726
2727 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %RGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
2728
2729 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
2730
2731 for (;;)
2732 {
2733 rc = SUPPageAlloc(cPages, &pvRam);
2734 if (RT_SUCCESS(rc))
2735 {
2736 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
2737 if (RT_SUCCESS(rc))
2738 return rc;
2739
2740 SUPPageFree(pvRam, cPages);
2741 }
2742
2743 VMSTATE enmVMState = VMR3GetState(pVM);
2744 if (enmVMState != VMSTATE_RUNNING)
2745 {
2746 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %RGp!\n", GCPhys));
2747 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %RGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
2748 return rc;
2749 }
2750
2751 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
2752
2753 /* Pause first, then inform Main. */
2754 rc = VMR3SuspendNoSave(pVM);
2755 AssertRC(rc);
2756
2757 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM");
2758
2759 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
2760 rc = VMR3WaitForResume(pVM);
2761
2762 /* Retry */
2763 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
2764 }
2765}
2766
2767
2768/**
2769 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
2770 * flags of existing RAM ranges.
2771 *
2772 * @returns VBox status.
2773 * @param pVM The VM handle.
2774 * @param GCPhys GC physical address of the RAM range. (page aligned)
2775 * @param cb Size of the RAM range. (page aligned)
2776 * @param fFlags The Or flags, MM_RAM_* \#defines.
2777 * @param fMask The and mask for the flags.
2778 */
2779VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
2780{
2781 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
2782
2783 /*
2784 * Validate input.
2785 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
2786 */
2787 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
2788 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2789 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2790 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2791 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2792
2793 /*
2794 * Lookup the range.
2795 */
2796 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2797 while (pRam && GCPhys > pRam->GCPhysLast)
2798 pRam = pRam->CTX_SUFF(pNext);
2799 if ( !pRam
2800 || GCPhys > pRam->GCPhysLast
2801 || GCPhysLast < pRam->GCPhys)
2802 {
2803 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
2804 return VERR_INVALID_PARAMETER;
2805 }
2806
2807 /*
2808 * Update the requested flags.
2809 */
2810 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
2811 | fMask;
2812 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
2813 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2814 for ( ; iPage < iPageEnd; iPage++)
2815 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
2816
2817 return VINF_SUCCESS;
2818}
2819
2820#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2821
2822/**
2823 * Sets the Address Gate 20 state.
2824 *
2825 * @param pVM VM handle.
2826 * @param fEnable True if the gate should be enabled.
2827 * False if the gate should be disabled.
2828 */
2829VMMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
2830{
2831 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
2832 if (pVM->pgm.s.fA20Enabled != fEnable)
2833 {
2834 pVM->pgm.s.fA20Enabled = fEnable;
2835 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2836 REMR3A20Set(pVM, fEnable);
2837 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
2838 }
2839}
2840
2841
2842/**
2843 * Tree enumeration callback for dealing with age rollover.
2844 * It will perform a simple compression of the current age.
2845 */
2846static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2847{
2848 /* Age compression - ASSUMES iNow == 4. */
2849 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2850 if (pChunk->iAge >= UINT32_C(0xffffff00))
2851 pChunk->iAge = 3;
2852 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2853 pChunk->iAge = 2;
2854 else if (pChunk->iAge)
2855 pChunk->iAge = 1;
2856 else /* iAge = 0 */
2857 pChunk->iAge = 4;
2858
2859 /* reinsert */
2860 PVM pVM = (PVM)pvUser;
2861 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2862 pChunk->AgeCore.Key = pChunk->iAge;
2863 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2864 return 0;
2865}
2866
2867
2868/**
2869 * Tree enumeration callback that updates the chunks that have
2870 * been used since the last
2871 */
2872static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2873{
2874 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2875 if (!pChunk->iAge)
2876 {
2877 PVM pVM = (PVM)pvUser;
2878 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2879 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2880 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2881 }
2882
2883 return 0;
2884}
2885
2886
2887/**
2888 * Performs ageing of the ring-3 chunk mappings.
2889 *
2890 * @param pVM The VM handle.
2891 */
2892VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2893{
2894 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2895 pVM->pgm.s.ChunkR3Map.iNow++;
2896 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2897 {
2898 pVM->pgm.s.ChunkR3Map.iNow = 4;
2899 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2900 }
2901 else
2902 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2903}
2904
2905
2906/**
2907 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2908 */
2909typedef struct PGMR3PHYSCHUNKUNMAPCB
2910{
2911 PVM pVM; /**< The VM handle. */
2912 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2913} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2914
2915
2916/**
2917 * Callback used to find the mapping that's been unused for
2918 * the longest time.
2919 */
2920static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2921{
2922 do
2923 {
2924 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2925 if ( pChunk->iAge
2926 && !pChunk->cRefs)
2927 {
2928 /*
2929 * Check that it's not in any of the TLBs.
2930 */
2931 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2932 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2933 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2934 {
2935 pChunk = NULL;
2936 break;
2937 }
2938 if (pChunk)
2939 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2940 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2941 {
2942 pChunk = NULL;
2943 break;
2944 }
2945 if (pChunk)
2946 {
2947 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2948 return 1; /* done */
2949 }
2950 }
2951
2952 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2953 pNode = pNode->pList;
2954 } while (pNode);
2955 return 0;
2956}
2957
2958
2959/**
2960 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2961 *
2962 * The candidate will not be part of any TLBs, so no need to flush
2963 * anything afterwards.
2964 *
2965 * @returns Chunk id.
2966 * @param pVM The VM handle.
2967 */
2968static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2969{
2970 /*
2971 * Do tree ageing first?
2972 */
2973 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2974 PGMR3PhysChunkAgeing(pVM);
2975
2976 /*
2977 * Enumerate the age tree starting with the left most node.
2978 */
2979 PGMR3PHYSCHUNKUNMAPCB Args;
2980 Args.pVM = pVM;
2981 Args.pChunk = NULL;
2982 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2983 return Args.pChunk->Core.Key;
2984 return INT32_MAX;
2985}
2986
2987
2988/**
2989 * Maps the given chunk into the ring-3 mapping cache.
2990 *
2991 * This will call ring-0.
2992 *
2993 * @returns VBox status code.
2994 * @param pVM The VM handle.
2995 * @param idChunk The chunk in question.
2996 * @param ppChunk Where to store the chunk tracking structure.
2997 *
2998 * @remarks Called from within the PGM critical section.
2999 */
3000int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
3001{
3002 int rc;
3003 /*
3004 * Allocate a new tracking structure first.
3005 */
3006#if 0 /* for later when we've got a separate mapping method for ring-0. */
3007 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
3008 AssertReturn(pChunk, VERR_NO_MEMORY);
3009#else
3010 PPGMCHUNKR3MAP pChunk;
3011 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
3012 AssertRCReturn(rc, rc);
3013#endif
3014 pChunk->Core.Key = idChunk;
3015 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
3016 pChunk->iAge = 0;
3017 pChunk->cRefs = 0;
3018 pChunk->cPermRefs = 0;
3019 pChunk->pv = NULL;
3020
3021 /*
3022 * Request the ring-0 part to map the chunk in question and if
3023 * necessary unmap another one to make space in the mapping cache.
3024 */
3025 GMMMAPUNMAPCHUNKREQ Req;
3026 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3027 Req.Hdr.cbReq = sizeof(Req);
3028 Req.pvR3 = NULL;
3029 Req.idChunkMap = idChunk;
3030 Req.idChunkUnmap = NIL_GMM_CHUNKID;
3031 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
3032 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
3033 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
3034 if (RT_SUCCESS(rc))
3035 {
3036 /*
3037 * Update the tree.
3038 */
3039 /* insert the new one. */
3040 AssertPtr(Req.pvR3);
3041 pChunk->pv = Req.pvR3;
3042 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
3043 AssertRelease(fRc);
3044 pVM->pgm.s.ChunkR3Map.c++;
3045
3046 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3047 AssertRelease(fRc);
3048
3049 /* remove the unmapped one. */
3050 if (Req.idChunkUnmap != NIL_GMM_CHUNKID)
3051 {
3052 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
3053 AssertRelease(pUnmappedChunk);
3054 pUnmappedChunk->pv = NULL;
3055 pUnmappedChunk->Core.Key = UINT32_MAX;
3056#if 0 /* for later when we've got a separate mapping method for ring-0. */
3057 MMR3HeapFree(pUnmappedChunk);
3058#else
3059 MMHyperFree(pVM, pUnmappedChunk);
3060#endif
3061 pVM->pgm.s.ChunkR3Map.c--;
3062 }
3063 }
3064 else
3065 {
3066 AssertRC(rc);
3067#if 0 /* for later when we've got a separate mapping method for ring-0. */
3068 MMR3HeapFree(pChunk);
3069#else
3070 MMHyperFree(pVM, pChunk);
3071#endif
3072 pChunk = NULL;
3073 }
3074
3075 *ppChunk = pChunk;
3076 return rc;
3077}
3078
3079
3080/**
3081 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
3082 *
3083 * @returns see pgmR3PhysChunkMap.
3084 * @param pVM The VM handle.
3085 * @param idChunk The chunk to map.
3086 */
3087VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
3088{
3089 PPGMCHUNKR3MAP pChunk;
3090 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
3091}
3092
3093
3094/**
3095 * Invalidates the TLB for the ring-3 mapping cache.
3096 *
3097 * @param pVM The VM handle.
3098 */
3099VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
3100{
3101 pgmLock(pVM);
3102 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3103 {
3104 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
3105 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
3106 }
3107 pgmUnlock(pVM);
3108}
3109
3110
3111/**
3112 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
3113 *
3114 * @returns The following VBox status codes.
3115 * @retval VINF_SUCCESS on success. FF cleared.
3116 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
3117 *
3118 * @param pVM The VM handle.
3119 */
3120VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
3121{
3122 pgmLock(pVM);
3123
3124 /*
3125 * Allocate more pages, noting down the index of the first new page.
3126 */
3127 uint32_t iClear = pVM->pgm.s.cHandyPages;
3128 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
3129 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
3130 int rcAlloc = VINF_SUCCESS;
3131 int rcSeed = VINF_SUCCESS;
3132 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3133 while (rc == VERR_GMM_SEED_ME)
3134 {
3135 void *pvChunk;
3136 rcAlloc = rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
3137 if (RT_SUCCESS(rc))
3138 {
3139 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
3140 if (RT_FAILURE(rc))
3141 SUPPageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
3142 }
3143 if (RT_SUCCESS(rc))
3144 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3145 }
3146
3147 /*
3148 * Clear the pages.
3149 */
3150 if (RT_SUCCESS(rc))
3151 {
3152 while (iClear < pVM->pgm.s.cHandyPages)
3153 {
3154 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
3155 void *pv;
3156 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
3157 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
3158 ASMMemZeroPage(pv);
3159 iClear++;
3160 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
3161 }
3162
3163 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3164 }
3165 else
3166 {
3167 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
3168 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
3169 rc, rcSeed, rcAlloc,
3170 pVM->pgm.s.cHandyPages,
3171 pVM->pgm.s.cAllPages,
3172 pVM->pgm.s.cPrivatePages,
3173 pVM->pgm.s.cSharedPages,
3174 pVM->pgm.s.cZeroPages));
3175#if 1
3176 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3177 {
3178 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
3179 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
3180 pVM->pgm.s.aHandyPages[i].idSharedPage));
3181 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
3182 if (idPage != NIL_GMM_PAGEID)
3183 {
3184 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
3185 pRam;
3186 pRam = pRam->pNextR3)
3187 {
3188 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
3189 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3190 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
3191 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
3192 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
3193 }
3194 }
3195 }
3196#endif
3197 rc = VERR_EM_NO_MEMORY;
3198 //rc = VINF_EM_NO_MEMORY;
3199 //VM_FF_SET(pVM, VM_FF_PGM_WE_ARE_SCREWED?);
3200 }
3201
3202/** @todo Do proper VERR_EM_NO_MEMORY reporting. */
3203 AssertMsg( pVM->pgm.s.cHandyPages == RT_ELEMENTS(pVM->pgm.s.aHandyPages)
3204 || rc != VINF_SUCCESS, ("%d rc=%Rrc\n", pVM->pgm.s.cHandyPages, rc));
3205
3206 pgmUnlock(pVM);
3207 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY || rc == VERR_EM_NO_MEMORY);
3208 return rc;
3209}
3210
3211
3212/**
3213 * Frees the specified RAM page and replaces it with the ZERO page.
3214 *
3215 * This is used by ballooning, remapping MMIO2 and RAM reset.
3216 *
3217 * @param pVM Pointer to the shared VM structure.
3218 * @param pReq Pointer to the request.
3219 * @param pPage Pointer to the page structure.
3220 * @param GCPhys The guest physical address of the page, if applicable.
3221 *
3222 * @remarks The caller must own the PGM lock.
3223 */
3224static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
3225{
3226 /*
3227 * Assert sanity.
3228 */
3229 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
3230 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
3231 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
3232 {
3233 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3234 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
3235 }
3236
3237 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
3238 return VINF_SUCCESS;
3239
3240 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
3241 Log3(("pgmPhysFreePage: idPage=%#x HCPhys=%RGp pPage=%R[pgmpage]\n", idPage, pPage));
3242 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
3243 || idPage > GMM_PAGEID_LAST
3244 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
3245 {
3246 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3247 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
3248 }
3249
3250 /* update page count stats. */
3251 if (PGM_PAGE_IS_SHARED(pPage))
3252 pVM->pgm.s.cSharedPages--;
3253 else
3254 pVM->pgm.s.cPrivatePages--;
3255 pVM->pgm.s.cZeroPages++;
3256
3257 /*
3258 * pPage = ZERO page.
3259 */
3260 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
3261 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
3262 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
3263
3264 /*
3265 * Make sure it's not in the handy page array.
3266 */
3267 uint32_t i = pVM->pgm.s.cHandyPages;
3268 while (i < RT_ELEMENTS(pVM->pgm.s.aHandyPages))
3269 {
3270 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
3271 {
3272 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
3273 break;
3274 }
3275 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
3276 {
3277 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
3278 break;
3279 }
3280 i++;
3281 }
3282
3283 /*
3284 * Push it onto the page array.
3285 */
3286 uint32_t iPage = *pcPendingPages;
3287 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
3288 *pcPendingPages += 1;
3289
3290 pReq->aPages[iPage].idPage = idPage;
3291
3292 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
3293 return VINF_SUCCESS;
3294
3295 /*
3296 * Flush the pages.
3297 */
3298 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
3299 if (RT_SUCCESS(rc))
3300 {
3301 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
3302 *pcPendingPages = 0;
3303 }
3304 return rc;
3305}
3306
3307
3308/**
3309 * Converts a GC physical address to a HC ring-3 pointer, with some
3310 * additional checks.
3311 *
3312 * @returns VBox status code.
3313 * @retval VINF_SUCCESS on success.
3314 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3315 * access handler of some kind.
3316 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3317 * accesses or is odd in any way.
3318 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3319 *
3320 * @param pVM The VM handle.
3321 * @param GCPhys The GC physical address to convert.
3322 * @param fWritable Whether write access is required.
3323 * @param ppv Where to store the pointer corresponding to GCPhys on
3324 * success.
3325 */
3326VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
3327{
3328 pgmLock(pVM);
3329
3330 PPGMRAMRANGE pRam;
3331 PPGMPAGE pPage;
3332 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
3333 if (RT_SUCCESS(rc))
3334 {
3335#ifdef VBOX_WITH_NEW_PHYS_CODE
3336 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3337 rc = VINF_SUCCESS;
3338 else
3339 {
3340 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3341 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3342 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3343 {
3344 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
3345 * in -norawr0 mode. */
3346 if (fWritable)
3347 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3348 }
3349 else
3350 {
3351 /* Temporariliy disabled phycial handler(s), since the recompiler
3352 doesn't get notified when it's reset we'll have to pretend its
3353 operating normally. */
3354 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3355 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3356 else
3357 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3358 }
3359 }
3360 if (RT_SUCCESS(rc))
3361 {
3362 int rc2;
3363
3364 /* Make sure what we return is writable. */
3365 if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
3366 switch (PGM_PAGE_GET_STATE(pPage))
3367 {
3368 case PGM_PAGE_STATE_ALLOCATED:
3369 break;
3370 case PGM_PAGE_STATE_ZERO:
3371 case PGM_PAGE_STATE_SHARED:
3372 case PGM_PAGE_STATE_WRITE_MONITORED:
3373 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
3374 AssertLogRelRCReturn(rc2, rc2);
3375 break;
3376 }
3377
3378 /* Get a ring-3 mapping of the address. */
3379 PPGMPAGER3MAPTLBE pTlbe;
3380 rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
3381 AssertLogRelRCReturn(rc2, rc2);
3382 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
3383 /** @todo mapping/locking hell; this isn't horribly efficient since
3384 * pgmPhysPageLoadIntoTlb will repeate the lookup we've done here. */
3385
3386 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3387 }
3388 else
3389 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3390
3391 /* else: handler catching all access, no pointer returned. */
3392
3393#else
3394 if (0)
3395 /* nothing */;
3396 else if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3397 {
3398 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3399 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3400 else if (fWritable && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3401 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3402 else
3403 {
3404 /* Temporariliy disabled phycial handler(s), since the recompiler
3405 doesn't get notified when it's reset we'll have to pretend its
3406 operating normally. */
3407 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3408 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3409 else
3410 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3411 }
3412 }
3413 else
3414 rc = VINF_SUCCESS;
3415 if (RT_SUCCESS(rc))
3416 {
3417 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3418 {
3419 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
3420 RTGCPHYS off = GCPhys - pRam->GCPhys;
3421 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3422 *ppv = (void *)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3423 }
3424 else if (RT_LIKELY(pRam->pvR3))
3425 {
3426 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
3427 RTGCPHYS off = GCPhys - pRam->GCPhys;
3428 *ppv = (uint8_t *)pRam->pvR3 + off;
3429 }
3430 else
3431 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3432 }
3433#endif /* !VBOX_WITH_NEW_PHYS_CODE */
3434 }
3435 else
3436 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3437
3438 pgmUnlock(pVM);
3439 return rc;
3440}
3441
3442
3443
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette