VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 22695

Last change on this file since 22695 was 22695, checked in by vboxsync, 16 years ago

Must flush pgm pool pages in PGMR3PhysGCPhys2CCPtrExternal to avoid changing dormant pgm pool pages.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 117.8 KB
Line 
1/* $Id: PGMPhys.cpp 22695 2009-09-02 08:41:52Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM_PHYS
27#include <VBox/pgm.h>
28#include <VBox/iom.h>
29#include <VBox/mm.h>
30#include <VBox/stam.h>
31#include <VBox/rem.h>
32#include <VBox/pdmdev.h>
33#include "PGMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/sup.h>
36#include <VBox/param.h>
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/alloc.h>
41#include <iprt/asm.h>
42#include <iprt/thread.h>
43#include <iprt/string.h>
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49/** The number of pages to free in one batch. */
50#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
51
52
53/*******************************************************************************
54* Internal Functions *
55*******************************************************************************/
56static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
57static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
58
59
60/*
61 * PGMR3PhysReadU8-64
62 * PGMR3PhysWriteU8-64
63 */
64#define PGMPHYSFN_READNAME PGMR3PhysReadU8
65#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
66#define PGMPHYS_DATASIZE 1
67#define PGMPHYS_DATATYPE uint8_t
68#include "PGMPhysRWTmpl.h"
69
70#define PGMPHYSFN_READNAME PGMR3PhysReadU16
71#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
72#define PGMPHYS_DATASIZE 2
73#define PGMPHYS_DATATYPE uint16_t
74#include "PGMPhysRWTmpl.h"
75
76#define PGMPHYSFN_READNAME PGMR3PhysReadU32
77#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
78#define PGMPHYS_DATASIZE 4
79#define PGMPHYS_DATATYPE uint32_t
80#include "PGMPhysRWTmpl.h"
81
82#define PGMPHYSFN_READNAME PGMR3PhysReadU64
83#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
84#define PGMPHYS_DATASIZE 8
85#define PGMPHYS_DATATYPE uint64_t
86#include "PGMPhysRWTmpl.h"
87
88
89/**
90 * EMT worker for PGMR3PhysReadExternal.
91 */
92static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
93{
94 PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead);
95 return VINF_SUCCESS;
96}
97
98
99/**
100 * Write to physical memory, external users.
101 *
102 * @returns VBox status code.
103 * @retval VINF_SUCCESS.
104 *
105 * @param pVM VM Handle.
106 * @param GCPhys Physical address to write to.
107 * @param pvBuf What to write.
108 * @param cbWrite How many bytes to write.
109 *
110 * @thread Any but EMTs.
111 */
112VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
113{
114 VM_ASSERT_OTHER_THREAD(pVM);
115
116 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
117 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
118
119 pgmLock(pVM);
120
121 /*
122 * Copy loop on ram ranges.
123 */
124 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
125 for (;;)
126 {
127 /* Find range. */
128 while (pRam && GCPhys > pRam->GCPhysLast)
129 pRam = pRam->CTX_SUFF(pNext);
130 /* Inside range or not? */
131 if (pRam && GCPhys >= pRam->GCPhys)
132 {
133 /*
134 * Must work our way thru this page by page.
135 */
136 RTGCPHYS off = GCPhys - pRam->GCPhys;
137 while (off < pRam->cb)
138 {
139 unsigned iPage = off >> PAGE_SHIFT;
140 PPGMPAGE pPage = &pRam->aPages[iPage];
141
142 /*
143 * If the page has an ALL access handler, we'll have to
144 * delegate the job to EMT.
145 */
146 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
147 {
148 pgmUnlock(pVM);
149
150 PVMREQ pReq = NULL;
151 int rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT,
152 (PFNRT)pgmR3PhysReadExternalEMT, 4, pVM, &GCPhys, pvBuf, cbRead);
153 if (RT_SUCCESS(rc))
154 {
155 rc = pReq->iStatus;
156 VMR3ReqFree(pReq);
157 }
158 return rc;
159 }
160 Assert(!PGM_PAGE_IS_MMIO(pPage));
161
162 /*
163 * Simple stuff, go ahead.
164 */
165 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
166 if (cb > cbRead)
167 cb = cbRead;
168 const void *pvSrc;
169 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
170 if (RT_SUCCESS(rc))
171 memcpy(pvBuf, pvSrc, cb);
172 else
173 {
174 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
175 pRam->GCPhys + off, pPage, rc));
176 memset(pvBuf, 0xff, cb);
177 }
178
179 /* next page */
180 if (cb >= cbRead)
181 {
182 pgmUnlock(pVM);
183 return VINF_SUCCESS;
184 }
185 cbRead -= cb;
186 off += cb;
187 GCPhys += cb;
188 pvBuf = (char *)pvBuf + cb;
189 } /* walk pages in ram range. */
190 }
191 else
192 {
193 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
194
195 /*
196 * Unassigned address space.
197 */
198 if (!pRam)
199 break;
200 size_t cb = pRam->GCPhys - GCPhys;
201 if (cb >= cbRead)
202 {
203 memset(pvBuf, 0xff, cbRead);
204 break;
205 }
206 memset(pvBuf, 0xff, cb);
207
208 cbRead -= cb;
209 pvBuf = (char *)pvBuf + cb;
210 GCPhys += cb;
211 }
212 } /* Ram range walk */
213
214 pgmUnlock(pVM);
215
216 return VINF_SUCCESS;
217}
218
219
220/**
221 * EMT worker for PGMR3PhysWriteExternal.
222 */
223static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
224{
225 /** @todo VERR_EM_NO_MEMORY */
226 PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite);
227 return VINF_SUCCESS;
228}
229
230
231/**
232 * Write to physical memory, external users.
233 *
234 * @returns VBox status code.
235 * @retval VINF_SUCCESS.
236 * @retval VERR_EM_NO_MEMORY.
237 *
238 * @param pVM VM Handle.
239 * @param GCPhys Physical address to write to.
240 * @param pvBuf What to write.
241 * @param cbWrite How many bytes to write.
242 *
243 * @thread Any but EMTs.
244 */
245VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
246{
247 VM_ASSERT_OTHER_THREAD(pVM);
248
249 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMR3PhysWriteExternal after pgmR3Save()!\n"));
250 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
251 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
252
253 pgmLock(pVM);
254
255 /*
256 * Copy loop on ram ranges, stop when we hit something difficult.
257 */
258 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
259 for (;;)
260 {
261 /* Find range. */
262 while (pRam && GCPhys > pRam->GCPhysLast)
263 pRam = pRam->CTX_SUFF(pNext);
264 /* Inside range or not? */
265 if (pRam && GCPhys >= pRam->GCPhys)
266 {
267 /*
268 * Must work our way thru this page by page.
269 */
270 RTGCPTR off = GCPhys - pRam->GCPhys;
271 while (off < pRam->cb)
272 {
273 RTGCPTR iPage = off >> PAGE_SHIFT;
274 PPGMPAGE pPage = &pRam->aPages[iPage];
275
276 /*
277 * It the page is in any way problematic, we have to
278 * do the work on the EMT. Anything that needs to be made
279 * writable or involves access handlers is problematic.
280 */
281 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
282 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
283 {
284 pgmUnlock(pVM);
285
286 PVMREQ pReq = NULL;
287 int rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT,
288 (PFNRT)pgmR3PhysWriteExternalEMT, 4, pVM, &GCPhys, pvBuf, cbWrite);
289 if (RT_SUCCESS(rc))
290 {
291 rc = pReq->iStatus;
292 VMR3ReqFree(pReq);
293 }
294 return rc;
295 }
296 Assert(!PGM_PAGE_IS_MMIO(pPage));
297
298 /*
299 * Simple stuff, go ahead.
300 */
301 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
302 if (cb > cbWrite)
303 cb = cbWrite;
304 void *pvDst;
305 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
306 if (RT_SUCCESS(rc))
307 memcpy(pvDst, pvBuf, cb);
308 else
309 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
310 pRam->GCPhys + off, pPage, rc));
311
312 /* next page */
313 if (cb >= cbWrite)
314 {
315 pgmUnlock(pVM);
316 return VINF_SUCCESS;
317 }
318
319 cbWrite -= cb;
320 off += cb;
321 GCPhys += cb;
322 pvBuf = (const char *)pvBuf + cb;
323 } /* walk pages in ram range */
324 }
325 else
326 {
327 /*
328 * Unassigned address space, skip it.
329 */
330 if (!pRam)
331 break;
332 size_t cb = pRam->GCPhys - GCPhys;
333 if (cb >= cbWrite)
334 break;
335 cbWrite -= cb;
336 pvBuf = (const char *)pvBuf + cb;
337 GCPhys += cb;
338 }
339 } /* Ram range walk */
340
341 pgmUnlock(pVM);
342 return VINF_SUCCESS;
343}
344
345
346/**
347 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
348 *
349 * @returns see PGMR3PhysGCPhys2CCPtrExternal
350 * @param pVM The VM handle.
351 * @param pGCPhys Pointer to the guest physical address.
352 * @param ppv Where to store the mapping address.
353 * @param pLock Where to store the lock.
354 */
355static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
356{
357 /*
358 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
359 * an access handler after it succeeds.
360 */
361 int rc = pgmLock(pVM);
362 AssertRCReturn(rc, rc);
363
364 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
365 if (RT_SUCCESS(rc))
366 {
367 PPGMPAGEMAPTLBE pTlbe;
368 int rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, *pGCPhys, &pTlbe);
369 AssertFatalRC(rc2);
370 PPGMPAGE pPage = pTlbe->pPage;
371 if (PGM_PAGE_IS_MMIO(pPage))
372 {
373 PGMPhysReleasePageMappingLock(pVM, pLock);
374 rc = VERR_PGM_PHYS_PAGE_RESERVED;
375 }
376 else
377 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
378 {
379 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
380 * not be informed about writes and keep bogus gst->shw mappings around.
381 */
382 PGMPoolFlushPage(pVM, *pGCPhys);
383 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
384 }
385 }
386
387 pgmUnlock(pVM);
388 return rc;
389}
390
391
392/**
393 * Requests the mapping of a guest page into ring-3, external threads.
394 *
395 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
396 * release it.
397 *
398 * This API will assume your intention is to write to the page, and will
399 * therefore replace shared and zero pages. If you do not intend to modify the
400 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
401 *
402 * @returns VBox status code.
403 * @retval VINF_SUCCESS on success.
404 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
405 * backing or if the page has any active access handlers. The caller
406 * must fall back on using PGMR3PhysWriteExternal.
407 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
408 *
409 * @param pVM The VM handle.
410 * @param GCPhys The guest physical address of the page that should be mapped.
411 * @param ppv Where to store the address corresponding to GCPhys.
412 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
413 *
414 * @remark Avoid calling this API from within critical sections (other than the
415 * PGM one) because of the deadlock risk when we have to delegating the
416 * task to an EMT.
417 * @thread Any.
418 */
419VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
420{
421 AssertPtr(ppv);
422 AssertPtr(pLock);
423
424 int rc = pgmLock(pVM);
425 AssertRCReturn(rc, rc);
426
427 /*
428 * Query the Physical TLB entry for the page (may fail).
429 */
430 PPGMPAGEMAPTLBE pTlbe;
431 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
432 if (RT_SUCCESS(rc))
433 {
434 PPGMPAGE pPage = pTlbe->pPage;
435 if (PGM_PAGE_IS_MMIO(pPage))
436 rc = VERR_PGM_PHYS_PAGE_RESERVED;
437 else
438 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
439 {
440 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
441 * not be informed about writes and keep bogus gst->shw mappings around.
442 */
443 PGMPoolFlushPage(pVM, GCPhys);
444 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
445 }
446 else
447 {
448 /*
449 * If the page is shared, the zero page, or being write monitored
450 * it must be converted to an page that's writable if possible.
451 * This has to be done on an EMT.
452 */
453 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
454 {
455 pgmUnlock(pVM);
456
457 PVMREQ pReq = NULL;
458 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT,
459 (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4, pVM, &GCPhys, ppv, pLock);
460 if (RT_SUCCESS(rc))
461 {
462 rc = pReq->iStatus;
463 VMR3ReqFree(pReq);
464 }
465 return rc;
466 }
467
468 /*
469 * Now, just perform the locking and calculate the return address.
470 */
471 PPGMPAGEMAP pMap = pTlbe->pMap;
472 pMap->cRefs++;
473#if 0 /** @todo implement locking properly */
474 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
475 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
476 {
477 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
478 pMap->cRefs++; /* Extra ref to prevent it from going away. */
479 }
480#endif
481 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
482 pLock->pvPage = pPage;
483 pLock->pvMap = pMap;
484 }
485 }
486
487 pgmUnlock(pVM);
488 return rc;
489}
490
491
492/**
493 * Requests the mapping of a guest page into ring-3, external threads.
494 *
495 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
496 * release it.
497 *
498 * @returns VBox status code.
499 * @retval VINF_SUCCESS on success.
500 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
501 * backing or if the page as an active ALL access handler. The caller
502 * must fall back on using PGMPhysRead.
503 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
504 *
505 * @param pVM The VM handle.
506 * @param GCPhys The guest physical address of the page that should be mapped.
507 * @param ppv Where to store the address corresponding to GCPhys.
508 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
509 *
510 * @remark Avoid calling this API from within critical sections (other than
511 * the PGM one) because of the deadlock risk.
512 * @thread Any.
513 */
514VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
515{
516 int rc = pgmLock(pVM);
517 AssertRCReturn(rc, rc);
518
519 /*
520 * Query the Physical TLB entry for the page (may fail).
521 */
522 PPGMPAGEMAPTLBE pTlbe;
523 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
524 if (RT_SUCCESS(rc))
525 {
526 PPGMPAGE pPage = pTlbe->pPage;
527#if 1
528 /* MMIO pages doesn't have any readable backing. */
529 if (PGM_PAGE_IS_MMIO(pPage))
530 rc = VERR_PGM_PHYS_PAGE_RESERVED;
531#else
532 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
533 rc = VERR_PGM_PHYS_PAGE_RESERVED;
534#endif
535 else
536 {
537 /*
538 * Now, just perform the locking and calculate the return address.
539 */
540 PPGMPAGEMAP pMap = pTlbe->pMap;
541 pMap->cRefs++;
542#if 0 /** @todo implement locking properly */
543 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
544 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
545 {
546 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
547 pMap->cRefs++; /* Extra ref to prevent it from going away. */
548 }
549#endif
550 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
551 pLock->pvPage = pPage;
552 pLock->pvMap = pMap;
553 }
554 }
555
556 pgmUnlock(pVM);
557 return rc;
558}
559
560
561/**
562 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
563 *
564 * Called when anything was relocated.
565 *
566 * @param pVM Pointer to the shared VM structure.
567 */
568void pgmR3PhysRelinkRamRanges(PVM pVM)
569{
570 PPGMRAMRANGE pCur;
571
572#ifdef VBOX_STRICT
573 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
574 {
575 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
576 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
577 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
578 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
579 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
580 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
581 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesR3; pCur2; pCur2 = pCur2->pNextR3)
582 Assert( pCur2 == pCur
583 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
584 }
585#endif
586
587 pCur = pVM->pgm.s.pRamRangesR3;
588 if (pCur)
589 {
590 pVM->pgm.s.pRamRangesR0 = pCur->pSelfR0;
591 pVM->pgm.s.pRamRangesRC = pCur->pSelfRC;
592
593 for (; pCur->pNextR3; pCur = pCur->pNextR3)
594 {
595 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
596 pCur->pNextRC = pCur->pNextR3->pSelfRC;
597 }
598
599 Assert(pCur->pNextR0 == NIL_RTR0PTR);
600 Assert(pCur->pNextRC == NIL_RTRCPTR);
601 }
602 else
603 {
604 Assert(pVM->pgm.s.pRamRangesR0 == NIL_RTR0PTR);
605 Assert(pVM->pgm.s.pRamRangesRC == NIL_RTRCPTR);
606 }
607}
608
609
610/**
611 * Links a new RAM range into the list.
612 *
613 * @param pVM Pointer to the shared VM structure.
614 * @param pNew Pointer to the new list entry.
615 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
616 */
617static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
618{
619 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
620 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
621 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
622
623 pgmLock(pVM);
624
625 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
626 pNew->pNextR3 = pRam;
627 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
628 pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
629
630 if (pPrev)
631 {
632 pPrev->pNextR3 = pNew;
633 pPrev->pNextR0 = pNew->pSelfR0;
634 pPrev->pNextRC = pNew->pSelfRC;
635 }
636 else
637 {
638 pVM->pgm.s.pRamRangesR3 = pNew;
639 pVM->pgm.s.pRamRangesR0 = pNew->pSelfR0;
640 pVM->pgm.s.pRamRangesRC = pNew->pSelfRC;
641 }
642
643 pgmUnlock(pVM);
644}
645
646
647/**
648 * Unlink an existing RAM range from the list.
649 *
650 * @param pVM Pointer to the shared VM structure.
651 * @param pRam Pointer to the new list entry.
652 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
653 */
654static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
655{
656 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
657 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
658 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
659
660 pgmLock(pVM);
661
662 PPGMRAMRANGE pNext = pRam->pNextR3;
663 if (pPrev)
664 {
665 pPrev->pNextR3 = pNext;
666 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
667 pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
668 }
669 else
670 {
671 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
672 pVM->pgm.s.pRamRangesR3 = pNext;
673 pVM->pgm.s.pRamRangesR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
674 pVM->pgm.s.pRamRangesRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
675 }
676
677 pgmUnlock(pVM);
678}
679
680
681/**
682 * Unlink an existing RAM range from the list.
683 *
684 * @param pVM Pointer to the shared VM structure.
685 * @param pRam Pointer to the new list entry.
686 */
687static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
688{
689 pgmLock(pVM);
690
691 /* find prev. */
692 PPGMRAMRANGE pPrev = NULL;
693 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
694 while (pCur != pRam)
695 {
696 pPrev = pCur;
697 pCur = pCur->pNextR3;
698 }
699 AssertFatal(pCur);
700
701 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
702
703 pgmUnlock(pVM);
704}
705
706
707/**
708 * Frees a range of pages, replacing them with ZERO pages of the specified type.
709 *
710 * @returns VBox status code.
711 * @param pVM The VM handle.
712 * @param pRam The RAM range in which the pages resides.
713 * @param GCPhys The address of the first page.
714 * @param GCPhysLast The address of the last page.
715 * @param uType The page type to replace then with.
716 */
717static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
718{
719 uint32_t cPendingPages = 0;
720 PGMMFREEPAGESREQ pReq;
721 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
722 AssertLogRelRCReturn(rc, rc);
723
724 /* Itegerate the pages. */
725 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
726 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
727 while (cPagesLeft-- > 0)
728 {
729 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
730 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
731
732 PGM_PAGE_SET_TYPE(pPageDst, uType);
733
734 GCPhys += PAGE_SIZE;
735 pPageDst++;
736 }
737
738 if (cPendingPages)
739 {
740 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
741 AssertLogRelRCReturn(rc, rc);
742 }
743 GMMR3FreePagesCleanup(pReq);
744
745 return rc;
746}
747
748
749/**
750 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
751 *
752 * @param pVM The VM handle.
753 * @param pNew The new RAM range.
754 * @param GCPhys The address of the RAM range.
755 * @param GCPhysLast The last address of the RAM range.
756 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
757 * if in HMA.
758 * @param R0PtrNew Ditto for R0.
759 * @param pszDesc The description.
760 * @param pPrev The previous RAM range (for linking).
761 */
762static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
763 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
764{
765 /*
766 * Initialize the range.
767 */
768 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
769 pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
770 pNew->GCPhys = GCPhys;
771 pNew->GCPhysLast = GCPhysLast;
772 pNew->cb = GCPhysLast - GCPhys + 1;
773 pNew->pszDesc = pszDesc;
774 pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
775 pNew->pvR3 = NULL;
776
777 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
778 RTGCPHYS iPage = cPages;
779 while (iPage-- > 0)
780 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
781
782 /* Update the page count stats. */
783 pVM->pgm.s.cZeroPages += cPages;
784 pVM->pgm.s.cAllPages += cPages;
785
786 /*
787 * Link it.
788 */
789 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
790}
791
792
793/**
794 * Relocate a floating RAM range.
795 *
796 * @copydoc FNPGMRELOCATE.
797 */
798static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
799{
800 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
801 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
802 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE);
803
804 switch (enmMode)
805 {
806 case PGMRELOCATECALL_SUGGEST:
807 return true;
808 case PGMRELOCATECALL_RELOCATE:
809 {
810 /* Update myself and then relink all the ranges. */
811 pgmLock(pVM);
812 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
813 pgmR3PhysRelinkRamRanges(pVM);
814 pgmUnlock(pVM);
815 return true;
816 }
817
818 default:
819 AssertFailedReturn(false);
820 }
821}
822
823
824/**
825 * PGMR3PhysRegisterRam worker that registers a high chunk.
826 *
827 * @returns VBox status code.
828 * @param pVM The VM handle.
829 * @param GCPhys The address of the RAM.
830 * @param cRamPages The number of RAM pages to register.
831 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
832 * @param iChunk The chunk number.
833 * @param pszDesc The RAM range description.
834 * @param ppPrev Previous RAM range pointer. In/Out.
835 */
836static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
837 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
838 PPGMRAMRANGE *ppPrev)
839{
840 const char *pszDescChunk = iChunk == 0
841 ? pszDesc
842 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
843 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
844
845 /*
846 * Allocate memory for the new chunk.
847 */
848 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
849 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
850 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
851 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
852 void *pvChunk = NULL;
853 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
854#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
855 VMMIsHwVirtExtForced(pVM) ? &R0PtrChunk : NULL,
856#else
857 NULL,
858#endif
859 paChunkPages);
860 if (RT_SUCCESS(rc))
861 {
862#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
863 if (!VMMIsHwVirtExtForced(pVM))
864 R0PtrChunk = NIL_RTR0PTR;
865#else
866 R0PtrChunk = (uintptr_t)pvChunk;
867#endif
868 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
869
870 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
871
872 /*
873 * Create a mapping and map the pages into it.
874 * We push these in below the HMA.
875 */
876 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
877 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
878 if (RT_SUCCESS(rc))
879 {
880 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
881
882 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
883 RTGCPTR GCPtrPage = GCPtrChunk;
884 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
885 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
886 if (RT_SUCCESS(rc))
887 {
888 /*
889 * Ok, init and link the range.
890 */
891 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
892 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
893 *ppPrev = pNew;
894 }
895 }
896
897 if (RT_FAILURE(rc))
898 SUPR3PageFreeEx(pvChunk, cChunkPages);
899 }
900
901 RTMemTmpFree(paChunkPages);
902 return rc;
903}
904
905
906/**
907 * Sets up a range RAM.
908 *
909 * This will check for conflicting registrations, make a resource
910 * reservation for the memory (with GMM), and setup the per-page
911 * tracking structures (PGMPAGE).
912 *
913 * @returns VBox stutus code.
914 * @param pVM Pointer to the shared VM structure.
915 * @param GCPhys The physical address of the RAM.
916 * @param cb The size of the RAM.
917 * @param pszDesc The description - not copied, so, don't free or change it.
918 */
919VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
920{
921 /*
922 * Validate input.
923 */
924 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
925 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
926 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
927 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
928 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
929 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
930 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
931 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
932
933 pgmLock(pVM);
934
935 /*
936 * Find range location and check for conflicts.
937 * (We don't lock here because the locking by EMT is only required on update.)
938 */
939 PPGMRAMRANGE pPrev = NULL;
940 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
941 while (pRam && GCPhysLast >= pRam->GCPhys)
942 {
943 if ( GCPhysLast >= pRam->GCPhys
944 && GCPhys <= pRam->GCPhysLast)
945 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
946 GCPhys, GCPhysLast, pszDesc,
947 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
948 VERR_PGM_RAM_CONFLICT);
949
950 /* next */
951 pPrev = pRam;
952 pRam = pRam->pNextR3;
953 }
954
955 /*
956 * Register it with GMM (the API bitches).
957 */
958 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
959 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
960 if (RT_FAILURE(rc))
961 {
962 pgmUnlock(pVM);
963 return rc;
964 }
965
966 if ( GCPhys >= _4G
967 && cPages > 256)
968 {
969 /*
970 * The PGMRAMRANGE structures for the high memory can get very big.
971 * In order to avoid SUPR3PageAllocEx allocation failures due to the
972 * allocation size limit there and also to avoid being unable to find
973 * guest mapping space for them, we split this memory up into 4MB in
974 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
975 * mode.
976 *
977 * The first and last page of each mapping are guard pages and marked
978 * not-present. So, we've got 4186112 and 16769024 bytes available for
979 * the PGMRAMRANGE structure.
980 *
981 * Note! The sizes used here will influence the saved state.
982 */
983 uint32_t cbChunk;
984 uint32_t cPagesPerChunk;
985 if (VMMIsHwVirtExtForced(pVM))
986 {
987 cbChunk = 16U*_1M;
988 cPagesPerChunk = 1048048; /* max ~1048059 */
989 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
990 }
991 else
992 {
993 cbChunk = 4U*_1M;
994 cPagesPerChunk = 261616; /* max ~261627 */
995 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616 < 4U*_1M - PAGE_SIZE * 2);
996 }
997 AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
998
999 RTGCPHYS cPagesLeft = cPages;
1000 RTGCPHYS GCPhysChunk = GCPhys;
1001 uint32_t iChunk = 0;
1002 while (cPagesLeft > 0)
1003 {
1004 uint32_t cPagesInChunk = cPagesLeft;
1005 if (cPagesInChunk > cPagesPerChunk)
1006 cPagesInChunk = cPagesPerChunk;
1007
1008 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1009 AssertRCReturn(rc, rc);
1010
1011 /* advance */
1012 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1013 cPagesLeft -= cPagesInChunk;
1014 iChunk++;
1015 }
1016 }
1017 else
1018 {
1019 /*
1020 * Allocate, initialize and link the new RAM range.
1021 */
1022 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1023 PPGMRAMRANGE pNew;
1024 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1025 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1026
1027 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1028 }
1029 pgmUnlock(pVM);
1030
1031 /*
1032 * Notify REM.
1033 */
1034 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
1035
1036 return VINF_SUCCESS;
1037}
1038
1039
1040/**
1041 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
1042 *
1043 * We do this late in the init process so that all the ROM and MMIO ranges have
1044 * been registered already and we don't go wasting memory on them.
1045 *
1046 * @returns VBox status code.
1047 *
1048 * @param pVM Pointer to the shared VM structure.
1049 */
1050int pgmR3PhysRamPreAllocate(PVM pVM)
1051{
1052 Assert(pVM->pgm.s.fRamPreAlloc);
1053 Log(("pgmR3PhysRamPreAllocate: enter\n"));
1054
1055 /*
1056 * Walk the RAM ranges and allocate all RAM pages, halt at
1057 * the first allocation error.
1058 */
1059 uint64_t cPages = 0;
1060 uint64_t NanoTS = RTTimeNanoTS();
1061 pgmLock(pVM);
1062 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
1063 {
1064 PPGMPAGE pPage = &pRam->aPages[0];
1065 RTGCPHYS GCPhys = pRam->GCPhys;
1066 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
1067 while (cLeft-- > 0)
1068 {
1069 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1070 {
1071 switch (PGM_PAGE_GET_STATE(pPage))
1072 {
1073 case PGM_PAGE_STATE_ZERO:
1074 {
1075 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
1076 if (RT_FAILURE(rc))
1077 {
1078 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
1079 pgmUnlock(pVM);
1080 return rc;
1081 }
1082 cPages++;
1083 break;
1084 }
1085
1086 case PGM_PAGE_STATE_ALLOCATED:
1087 case PGM_PAGE_STATE_WRITE_MONITORED:
1088 case PGM_PAGE_STATE_SHARED:
1089 /* nothing to do here. */
1090 break;
1091 }
1092 }
1093
1094 /* next */
1095 pPage++;
1096 GCPhys += PAGE_SIZE;
1097 }
1098 }
1099 pgmUnlock(pVM);
1100 NanoTS = RTTimeNanoTS() - NanoTS;
1101
1102 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
1103 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
1104 return VINF_SUCCESS;
1105}
1106
1107
1108/**
1109 * Resets (zeros) the RAM.
1110 *
1111 * ASSUMES that the caller owns the PGM lock.
1112 *
1113 * @returns VBox status code.
1114 * @param pVM Pointer to the shared VM structure.
1115 */
1116int pgmR3PhysRamReset(PVM pVM)
1117{
1118 Assert(PGMIsLockOwner(pVM));
1119 /*
1120 * We batch up pages before freeing them.
1121 */
1122 uint32_t cPendingPages = 0;
1123 PGMMFREEPAGESREQ pReq;
1124 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1125 AssertLogRelRCReturn(rc, rc);
1126
1127 /*
1128 * Walk the ram ranges.
1129 */
1130 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
1131 {
1132 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
1133 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
1134
1135 if (!pVM->pgm.s.fRamPreAlloc)
1136 {
1137 /* Replace all RAM pages by ZERO pages. */
1138 while (iPage-- > 0)
1139 {
1140 PPGMPAGE pPage = &pRam->aPages[iPage];
1141 switch (PGM_PAGE_GET_TYPE(pPage))
1142 {
1143 case PGMPAGETYPE_RAM:
1144 if (!PGM_PAGE_IS_ZERO(pPage))
1145 {
1146 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1147 AssertLogRelRCReturn(rc, rc);
1148 }
1149 break;
1150
1151 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1152 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1153 break;
1154
1155 case PGMPAGETYPE_MMIO2:
1156 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
1157 case PGMPAGETYPE_ROM:
1158 case PGMPAGETYPE_MMIO:
1159 break;
1160 default:
1161 AssertFailed();
1162 }
1163 } /* for each page */
1164 }
1165 else
1166 {
1167 /* Zero the memory. */
1168 while (iPage-- > 0)
1169 {
1170 PPGMPAGE pPage = &pRam->aPages[iPage];
1171 switch (PGM_PAGE_GET_TYPE(pPage))
1172 {
1173 case PGMPAGETYPE_RAM:
1174 switch (PGM_PAGE_GET_STATE(pPage))
1175 {
1176 case PGM_PAGE_STATE_ZERO:
1177 break;
1178 case PGM_PAGE_STATE_SHARED:
1179 case PGM_PAGE_STATE_WRITE_MONITORED:
1180 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1181 AssertLogRelRCReturn(rc, rc);
1182 case PGM_PAGE_STATE_ALLOCATED:
1183 {
1184 void *pvPage;
1185 PPGMPAGEMAP pMapIgnored;
1186 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pMapIgnored, &pvPage);
1187 AssertLogRelRCReturn(rc, rc);
1188 ASMMemZeroPage(pvPage);
1189 break;
1190 }
1191 }
1192 break;
1193
1194 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1195 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1196 break;
1197
1198 case PGMPAGETYPE_MMIO2:
1199 case PGMPAGETYPE_ROM_SHADOW:
1200 case PGMPAGETYPE_ROM:
1201 case PGMPAGETYPE_MMIO:
1202 break;
1203 default:
1204 AssertFailed();
1205
1206 }
1207 } /* for each page */
1208 }
1209
1210 }
1211
1212 /*
1213 * Finish off any pages pending freeing.
1214 */
1215 if (cPendingPages)
1216 {
1217 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1218 AssertLogRelRCReturn(rc, rc);
1219 }
1220 GMMR3FreePagesCleanup(pReq);
1221
1222 return VINF_SUCCESS;
1223}
1224
1225
1226/**
1227 * This is the interface IOM is using to register an MMIO region.
1228 *
1229 * It will check for conflicts and ensure that a RAM range structure
1230 * is present before calling the PGMR3HandlerPhysicalRegister API to
1231 * register the callbacks.
1232 *
1233 * @returns VBox status code.
1234 *
1235 * @param pVM Pointer to the shared VM structure.
1236 * @param GCPhys The start of the MMIO region.
1237 * @param cb The size of the MMIO region.
1238 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
1239 * @param pvUserR3 The user argument for R3.
1240 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
1241 * @param pvUserR0 The user argument for R0.
1242 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
1243 * @param pvUserRC The user argument for RC.
1244 * @param pszDesc The description of the MMIO region.
1245 */
1246VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
1247 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
1248 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
1249 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
1250 R3PTRTYPE(const char *) pszDesc)
1251{
1252 /*
1253 * Assert on some assumption.
1254 */
1255 VM_ASSERT_EMT(pVM);
1256 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1257 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1258 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1259 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1260
1261 /*
1262 * Make sure there's a RAM range structure for the region.
1263 */
1264 int rc;
1265 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1266 bool fRamExists = false;
1267 PPGMRAMRANGE pRamPrev = NULL;
1268 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1269 while (pRam && GCPhysLast >= pRam->GCPhys)
1270 {
1271 if ( GCPhysLast >= pRam->GCPhys
1272 && GCPhys <= pRam->GCPhysLast)
1273 {
1274 /* Simplification: all within the same range. */
1275 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1276 && GCPhysLast <= pRam->GCPhysLast,
1277 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
1278 GCPhys, GCPhysLast, pszDesc,
1279 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1280 VERR_PGM_RAM_CONFLICT);
1281
1282 /* Check that it's all RAM or MMIO pages. */
1283 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1284 uint32_t cLeft = cb >> PAGE_SHIFT;
1285 while (cLeft-- > 0)
1286 {
1287 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
1288 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
1289 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
1290 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
1291 VERR_PGM_RAM_CONFLICT);
1292 pPage++;
1293 }
1294
1295 /* Looks good. */
1296 fRamExists = true;
1297 break;
1298 }
1299
1300 /* next */
1301 pRamPrev = pRam;
1302 pRam = pRam->pNextR3;
1303 }
1304 PPGMRAMRANGE pNew;
1305 if (fRamExists)
1306 {
1307 pNew = NULL;
1308
1309 /*
1310 * Make all the pages in the range MMIO/ZERO pages, freeing any
1311 * RAM pages currently mapped here. This might not be 100% correct
1312 * for PCI memory, but we're doing the same thing for MMIO2 pages.
1313 */
1314 rc = pgmLock(pVM);
1315 if (RT_SUCCESS(rc))
1316 {
1317 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
1318 pgmUnlock(pVM);
1319 }
1320 AssertRCReturn(rc, rc);
1321 }
1322 else
1323 {
1324 pgmLock(pVM);
1325
1326 /*
1327 * No RAM range, insert an ad-hoc one.
1328 *
1329 * Note that we don't have to tell REM about this range because
1330 * PGMHandlerPhysicalRegisterEx will do that for us.
1331 */
1332 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
1333
1334 const uint32_t cPages = cb >> PAGE_SHIFT;
1335 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1336 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
1337 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1338
1339 /* Initialize the range. */
1340 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
1341 pNew->pSelfRC = MMHyperCCToRC(pVM, pNew);
1342 pNew->GCPhys = GCPhys;
1343 pNew->GCPhysLast = GCPhysLast;
1344 pNew->cb = cb;
1345 pNew->pszDesc = pszDesc;
1346 pNew->fFlags = 0; /** @todo add some kind of ad-hoc flag? */
1347
1348 pNew->pvR3 = NULL;
1349
1350 uint32_t iPage = cPages;
1351 while (iPage-- > 0)
1352 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
1353 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
1354
1355 /* update the page count stats. */
1356 pVM->pgm.s.cZeroPages += cPages;
1357 pVM->pgm.s.cAllPages += cPages;
1358
1359 /* link it */
1360 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
1361
1362 pgmUnlock(pVM);
1363 }
1364
1365 /*
1366 * Register the access handler.
1367 */
1368 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
1369 pfnHandlerR3, pvUserR3,
1370 pfnHandlerR0, pvUserR0,
1371 pfnHandlerRC, pvUserRC, pszDesc);
1372 if ( RT_FAILURE(rc)
1373 && !fRamExists)
1374 {
1375 pVM->pgm.s.cZeroPages -= cb >> PAGE_SHIFT;
1376 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
1377
1378 /* remove the ad-hoc range. */
1379 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
1380 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
1381 MMHyperFree(pVM, pRam);
1382 }
1383
1384 return rc;
1385}
1386
1387
1388/**
1389 * This is the interface IOM is using to register an MMIO region.
1390 *
1391 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
1392 * any ad-hoc PGMRAMRANGE left behind.
1393 *
1394 * @returns VBox status code.
1395 * @param pVM Pointer to the shared VM structure.
1396 * @param GCPhys The start of the MMIO region.
1397 * @param cb The size of the MMIO region.
1398 */
1399VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
1400{
1401 VM_ASSERT_EMT(pVM);
1402
1403 /*
1404 * First deregister the handler, then check if we should remove the ram range.
1405 */
1406 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1407 if (RT_SUCCESS(rc))
1408 {
1409 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1410 PPGMRAMRANGE pRamPrev = NULL;
1411 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1412 while (pRam && GCPhysLast >= pRam->GCPhys)
1413 {
1414 /** @todo We're being a bit too careful here. rewrite. */
1415 if ( GCPhysLast == pRam->GCPhysLast
1416 && GCPhys == pRam->GCPhys)
1417 {
1418 Assert(pRam->cb == cb);
1419
1420 /*
1421 * See if all the pages are dead MMIO pages.
1422 */
1423 uint32_t const cPages = cb >> PAGE_SHIFT;
1424 bool fAllMMIO = true;
1425 uint32_t iPage = 0;
1426 uint32_t cLeft = cPages;
1427 while (cLeft-- > 0)
1428 {
1429 PPGMPAGE pPage = &pRam->aPages[iPage];
1430 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
1431 /*|| not-out-of-action later */)
1432 {
1433 fAllMMIO = false;
1434 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1435 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1436 break;
1437 }
1438 Assert(PGM_PAGE_IS_ZERO(pPage));
1439 pPage++;
1440 }
1441 if (fAllMMIO)
1442 {
1443 /*
1444 * Ad-hoc range, unlink and free it.
1445 */
1446 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
1447 GCPhys, GCPhysLast, pRam->pszDesc));
1448
1449 pVM->pgm.s.cAllPages -= cPages;
1450 pVM->pgm.s.cZeroPages -= cPages;
1451
1452 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
1453 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
1454 MMHyperFree(pVM, pRam);
1455 break;
1456 }
1457 }
1458
1459 /*
1460 * Range match? It will all be within one range (see PGMAllHandler.cpp).
1461 */
1462 if ( GCPhysLast >= pRam->GCPhys
1463 && GCPhys <= pRam->GCPhysLast)
1464 {
1465 Assert(GCPhys >= pRam->GCPhys);
1466 Assert(GCPhysLast <= pRam->GCPhysLast);
1467
1468 /*
1469 * Turn the pages back into RAM pages.
1470 */
1471 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1472 uint32_t cLeft = cb >> PAGE_SHIFT;
1473 while (cLeft--)
1474 {
1475 PPGMPAGE pPage = &pRam->aPages[iPage];
1476 AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1477 AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1478 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
1479 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_RAM);
1480 }
1481 break;
1482 }
1483
1484 /* next */
1485 pRamPrev = pRam;
1486 pRam = pRam->pNextR3;
1487 }
1488 }
1489
1490 return rc;
1491}
1492
1493
1494/**
1495 * Locate a MMIO2 range.
1496 *
1497 * @returns Pointer to the MMIO2 range.
1498 * @param pVM Pointer to the shared VM structure.
1499 * @param pDevIns The device instance owning the region.
1500 * @param iRegion The region.
1501 */
1502DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1503{
1504 /*
1505 * Search the list.
1506 */
1507 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1508 if ( pCur->pDevInsR3 == pDevIns
1509 && pCur->iRegion == iRegion)
1510 return pCur;
1511 return NULL;
1512}
1513
1514
1515/**
1516 * Allocate and register an MMIO2 region.
1517 *
1518 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
1519 * RAM associated with a device. It is also non-shared memory with a
1520 * permanent ring-3 mapping and page backing (presently).
1521 *
1522 * A MMIO2 range may overlap with base memory if a lot of RAM
1523 * is configured for the VM, in which case we'll drop the base
1524 * memory pages. Presently we will make no attempt to preserve
1525 * anything that happens to be present in the base memory that
1526 * is replaced, this is of course incorrectly but it's too much
1527 * effort.
1528 *
1529 * @returns VBox status code.
1530 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
1531 * @retval VERR_ALREADY_EXISTS if the region already exists.
1532 *
1533 * @param pVM Pointer to the shared VM structure.
1534 * @param pDevIns The device instance owning the region.
1535 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
1536 * this number has to be the number of that region. Otherwise
1537 * it can be any number safe UINT8_MAX.
1538 * @param cb The size of the region. Must be page aligned.
1539 * @param fFlags Reserved for future use, must be zero.
1540 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
1541 * @param pszDesc The description.
1542 */
1543VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
1544{
1545 /*
1546 * Validate input.
1547 */
1548 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1549 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1550 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1551 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
1552 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1553 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1554 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
1555 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1556 AssertReturn(cb, VERR_INVALID_PARAMETER);
1557 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
1558
1559 const uint32_t cPages = cb >> PAGE_SHIFT;
1560 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
1561 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
1562
1563 /*
1564 * For the 2nd+ instance, mangle the description string so it's unique.
1565 */
1566 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
1567 {
1568 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
1569 if (!pszDesc)
1570 return VERR_NO_MEMORY;
1571 }
1572
1573 /*
1574 * Try reserve and allocate the backing memory first as this is what is
1575 * most likely to fail.
1576 */
1577 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
1578 if (RT_SUCCESS(rc))
1579 {
1580 void *pvPages;
1581 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
1582 if (RT_SUCCESS(rc))
1583 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
1584 if (RT_SUCCESS(rc))
1585 {
1586 memset(pvPages, 0, cPages * PAGE_SIZE);
1587
1588 /*
1589 * Create the MMIO2 range record for it.
1590 */
1591 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
1592 PPGMMMIO2RANGE pNew;
1593 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1594 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
1595 if (RT_SUCCESS(rc))
1596 {
1597 pNew->pDevInsR3 = pDevIns;
1598 pNew->pvR3 = pvPages;
1599 //pNew->pNext = NULL;
1600 //pNew->fMapped = false;
1601 //pNew->fOverlapping = false;
1602 pNew->iRegion = iRegion;
1603 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
1604 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange);
1605 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
1606 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
1607 pNew->RamRange.pszDesc = pszDesc;
1608 pNew->RamRange.cb = cb;
1609 //pNew->RamRange.fFlags = 0; /// @todo MMIO2 flag?
1610
1611 pNew->RamRange.pvR3 = pvPages;
1612
1613 uint32_t iPage = cPages;
1614 while (iPage-- > 0)
1615 {
1616 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
1617 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1618 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1619 }
1620
1621 /* update page count stats */
1622 pVM->pgm.s.cAllPages += cPages;
1623 pVM->pgm.s.cPrivatePages += cPages;
1624
1625 /*
1626 * Link it into the list.
1627 * Since there is no particular order, just push it.
1628 */
1629 pgmLock(pVM);
1630 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
1631 pVM->pgm.s.pMmio2RangesR3 = pNew;
1632 pgmUnlock(pVM);
1633
1634 *ppv = pvPages;
1635 RTMemTmpFree(paPages);
1636 return VINF_SUCCESS;
1637 }
1638
1639 SUPR3PageFreeEx(pvPages, cPages);
1640 }
1641 RTMemTmpFree(paPages);
1642 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
1643 }
1644 if (pDevIns->iInstance > 0)
1645 MMR3HeapFree((void *)pszDesc);
1646 return rc;
1647}
1648
1649
1650/**
1651 * Deregisters and frees an MMIO2 region.
1652 *
1653 * Any physical (and virtual) access handlers registered for the region must
1654 * be deregistered before calling this function.
1655 *
1656 * @returns VBox status code.
1657 * @param pVM Pointer to the shared VM structure.
1658 * @param pDevIns The device instance owning the region.
1659 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
1660 */
1661VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1662{
1663 /*
1664 * Validate input.
1665 */
1666 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1667 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1668 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
1669
1670 pgmLock(pVM);
1671 int rc = VINF_SUCCESS;
1672 unsigned cFound = 0;
1673 PPGMMMIO2RANGE pPrev = NULL;
1674 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
1675 while (pCur)
1676 {
1677 if ( pCur->pDevInsR3 == pDevIns
1678 && ( iRegion == UINT32_MAX
1679 || pCur->iRegion == iRegion))
1680 {
1681 cFound++;
1682
1683 /*
1684 * Unmap it if it's mapped.
1685 */
1686 if (pCur->fMapped)
1687 {
1688 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
1689 AssertRC(rc2);
1690 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1691 rc = rc2;
1692 }
1693
1694 /*
1695 * Unlink it
1696 */
1697 PPGMMMIO2RANGE pNext = pCur->pNextR3;
1698 if (pPrev)
1699 pPrev->pNextR3 = pNext;
1700 else
1701 pVM->pgm.s.pMmio2RangesR3 = pNext;
1702 pCur->pNextR3 = NULL;
1703
1704 /*
1705 * Free the memory.
1706 */
1707 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
1708 AssertRC(rc2);
1709 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1710 rc = rc2;
1711
1712 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
1713 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
1714 AssertRC(rc2);
1715 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1716 rc = rc2;
1717
1718 /* we're leaking hyper memory here if done at runtime. */
1719 Assert( VMR3GetState(pVM) == VMSTATE_OFF
1720 || VMR3GetState(pVM) == VMSTATE_DESTROYING
1721 || VMR3GetState(pVM) == VMSTATE_TERMINATED
1722 || VMR3GetState(pVM) == VMSTATE_CREATING);
1723 /*rc = MMHyperFree(pVM, pCur);
1724 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
1725
1726
1727 /* update page count stats */
1728 pVM->pgm.s.cAllPages -= cPages;
1729 pVM->pgm.s.cPrivatePages -= cPages;
1730
1731 /* next */
1732 pCur = pNext;
1733 }
1734 else
1735 {
1736 pPrev = pCur;
1737 pCur = pCur->pNextR3;
1738 }
1739 }
1740 pgmUnlock(pVM);
1741 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
1742}
1743
1744
1745/**
1746 * Maps a MMIO2 region.
1747 *
1748 * This is done when a guest / the bios / state loading changes the
1749 * PCI config. The replacing of base memory has the same restrictions
1750 * as during registration, of course.
1751 *
1752 * @returns VBox status code.
1753 *
1754 * @param pVM Pointer to the shared VM structure.
1755 * @param pDevIns The
1756 */
1757VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1758{
1759 /*
1760 * Validate input
1761 */
1762 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1763 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1764 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1765 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1766 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1767 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1768
1769 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1770 AssertReturn(pCur, VERR_NOT_FOUND);
1771 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
1772 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
1773 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
1774
1775 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
1776 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1777
1778 /*
1779 * Find our location in the ram range list, checking for
1780 * restriction we don't bother implementing yet (partially overlapping).
1781 */
1782 bool fRamExists = false;
1783 PPGMRAMRANGE pRamPrev = NULL;
1784 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1785 while (pRam && GCPhysLast >= pRam->GCPhys)
1786 {
1787 if ( GCPhys <= pRam->GCPhysLast
1788 && GCPhysLast >= pRam->GCPhys)
1789 {
1790 /* completely within? */
1791 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1792 && GCPhysLast <= pRam->GCPhysLast,
1793 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
1794 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
1795 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1796 VERR_PGM_RAM_CONFLICT);
1797 fRamExists = true;
1798 break;
1799 }
1800
1801 /* next */
1802 pRamPrev = pRam;
1803 pRam = pRam->pNextR3;
1804 }
1805 if (fRamExists)
1806 {
1807 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1808 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1809 while (cPagesLeft-- > 0)
1810 {
1811 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1812 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
1813 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
1814 VERR_PGM_RAM_CONFLICT);
1815 pPage++;
1816 }
1817 }
1818 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
1819 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
1820
1821 /*
1822 * Make the changes.
1823 */
1824 pgmLock(pVM);
1825
1826 pCur->RamRange.GCPhys = GCPhys;
1827 pCur->RamRange.GCPhysLast = GCPhysLast;
1828 pCur->fMapped = true;
1829 pCur->fOverlapping = fRamExists;
1830
1831 if (fRamExists)
1832 {
1833/** @todo use pgmR3PhysFreePageRange here. */
1834 uint32_t cPendingPages = 0;
1835 PGMMFREEPAGESREQ pReq;
1836 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1837 AssertLogRelRCReturn(rc, rc);
1838
1839 /* replace the pages, freeing all present RAM pages. */
1840 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
1841 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1842 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1843 while (cPagesLeft-- > 0)
1844 {
1845 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
1846 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1847
1848 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
1849 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
1850 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
1851 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
1852
1853 pVM->pgm.s.cZeroPages--;
1854 GCPhys += PAGE_SIZE;
1855 pPageSrc++;
1856 pPageDst++;
1857 }
1858
1859 if (cPendingPages)
1860 {
1861 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1862 AssertLogRelRCReturn(rc, rc);
1863 }
1864 GMMR3FreePagesCleanup(pReq);
1865 pgmUnlock(pVM);
1866 }
1867 else
1868 {
1869 RTGCPHYS cb = pCur->RamRange.cb;
1870
1871 /* link in the ram range */
1872 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
1873 pgmUnlock(pVM);
1874
1875 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
1876 }
1877
1878 return VINF_SUCCESS;
1879}
1880
1881
1882/**
1883 * Unmaps a MMIO2 region.
1884 *
1885 * This is done when a guest / the bios / state loading changes the
1886 * PCI config. The replacing of base memory has the same restrictions
1887 * as during registration, of course.
1888 */
1889VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1890{
1891 bool fInformREM = false;
1892 RTGCPHYS GCPhysRangeREM;
1893 RTGCPHYS cbRangeREM;
1894
1895 /*
1896 * Validate input
1897 */
1898 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1899 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1900 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1901 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1902 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1903 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1904
1905 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1906 AssertReturn(pCur, VERR_NOT_FOUND);
1907 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
1908 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
1909 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
1910
1911 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
1912 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
1913
1914 /*
1915 * Unmap it.
1916 */
1917 pgmLock(pVM);
1918
1919 if (pCur->fOverlapping)
1920 {
1921 /* Restore the RAM pages we've replaced. */
1922 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1923 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
1924 pRam = pRam->pNextR3;
1925
1926 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
1927 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
1928 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1929 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1930 while (cPagesLeft-- > 0)
1931 {
1932 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhysZeroPg);
1933 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
1934 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
1935 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
1936
1937 pVM->pgm.s.cZeroPages++;
1938 pPageDst++;
1939 }
1940 }
1941 else
1942 {
1943 GCPhysRangeREM = pCur->RamRange.GCPhys;
1944 cbRangeREM = pCur->RamRange.cb;
1945 fInformREM = true;
1946
1947 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
1948 }
1949
1950 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
1951 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
1952 pCur->fOverlapping = false;
1953 pCur->fMapped = false;
1954
1955 pgmUnlock(pVM);
1956
1957 if (fInformREM)
1958 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, cbRangeREM);
1959
1960 return VINF_SUCCESS;
1961}
1962
1963
1964/**
1965 * Checks if the given address is an MMIO2 base address or not.
1966 *
1967 * @returns true/false accordingly.
1968 * @param pVM Pointer to the shared VM structure.
1969 * @param pDevIns The owner of the memory, optional.
1970 * @param GCPhys The address to check.
1971 */
1972VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1973{
1974 /*
1975 * Validate input
1976 */
1977 VM_ASSERT_EMT_RETURN(pVM, false);
1978 AssertPtrReturn(pDevIns, false);
1979 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1980 AssertReturn(GCPhys != 0, false);
1981 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1982
1983 /*
1984 * Search the list.
1985 */
1986 pgmLock(pVM);
1987 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1988 if (pCur->RamRange.GCPhys == GCPhys)
1989 {
1990 Assert(pCur->fMapped);
1991 pgmUnlock(pVM);
1992 return true;
1993 }
1994 pgmUnlock(pVM);
1995 return false;
1996}
1997
1998
1999/**
2000 * Gets the HC physical address of a page in the MMIO2 region.
2001 *
2002 * This is API is intended for MMHyper and shouldn't be called
2003 * by anyone else...
2004 *
2005 * @returns VBox status code.
2006 * @param pVM Pointer to the shared VM structure.
2007 * @param pDevIns The owner of the memory, optional.
2008 * @param iRegion The region.
2009 * @param off The page expressed an offset into the MMIO2 region.
2010 * @param pHCPhys Where to store the result.
2011 */
2012VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
2013{
2014 /*
2015 * Validate input
2016 */
2017 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2018 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2019 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2020
2021 pgmLock(pVM);
2022 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2023 AssertReturn(pCur, VERR_NOT_FOUND);
2024 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2025
2026 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
2027 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
2028 pgmUnlock(pVM);
2029 return VINF_SUCCESS;
2030}
2031
2032
2033/**
2034 * Maps a portion of an MMIO2 region into kernel space (host).
2035 *
2036 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
2037 * or the VM is terminated.
2038 *
2039 * @return VBox status code.
2040 *
2041 * @param pVM Pointer to the shared VM structure.
2042 * @param pDevIns The device owning the MMIO2 memory.
2043 * @param iRegion The region.
2044 * @param off The offset into the region. Must be page aligned.
2045 * @param cb The number of bytes to map. Must be page aligned.
2046 * @param pszDesc Mapping description.
2047 * @param pR0Ptr Where to store the R0 address.
2048 */
2049VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
2050 const char *pszDesc, PRTR0PTR pR0Ptr)
2051{
2052 /*
2053 * Validate input.
2054 */
2055 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2056 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2057 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2058
2059 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2060 AssertReturn(pCur, VERR_NOT_FOUND);
2061 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2062 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2063 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2064
2065 /*
2066 * Pass the request on to the support library/driver.
2067 */
2068 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
2069
2070 return rc;
2071}
2072
2073
2074/**
2075 * Registers a ROM image.
2076 *
2077 * Shadowed ROM images requires double the amount of backing memory, so,
2078 * don't use that unless you have to. Shadowing of ROM images is process
2079 * where we can select where the reads go and where the writes go. On real
2080 * hardware the chipset provides means to configure this. We provide
2081 * PGMR3PhysProtectROM() for this purpose.
2082 *
2083 * A read-only copy of the ROM image will always be kept around while we
2084 * will allocate RAM pages for the changes on demand (unless all memory
2085 * is configured to be preallocated).
2086 *
2087 * @returns VBox status.
2088 * @param pVM VM Handle.
2089 * @param pDevIns The device instance owning the ROM.
2090 * @param GCPhys First physical address in the range.
2091 * Must be page aligned!
2092 * @param cbRange The size of the range (in bytes).
2093 * Must be page aligned!
2094 * @param pvBinary Pointer to the binary data backing the ROM image.
2095 * This must be exactly \a cbRange in size.
2096 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
2097 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
2098 * @param pszDesc Pointer to description string. This must not be freed.
2099 *
2100 * @remark There is no way to remove the rom, automatically on device cleanup or
2101 * manually from the device yet. This isn't difficult in any way, it's
2102 * just not something we expect to be necessary for a while.
2103 */
2104VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
2105 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
2106{
2107 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
2108 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
2109
2110 /*
2111 * Validate input.
2112 */
2113 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2114 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
2115 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
2116 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2117 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2118 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
2119 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2120 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
2121 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
2122
2123 const uint32_t cPages = cb >> PAGE_SHIFT;
2124
2125 /*
2126 * Find the ROM location in the ROM list first.
2127 */
2128 PPGMROMRANGE pRomPrev = NULL;
2129 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
2130 while (pRom && GCPhysLast >= pRom->GCPhys)
2131 {
2132 if ( GCPhys <= pRom->GCPhysLast
2133 && GCPhysLast >= pRom->GCPhys)
2134 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
2135 GCPhys, GCPhysLast, pszDesc,
2136 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
2137 VERR_PGM_RAM_CONFLICT);
2138 /* next */
2139 pRomPrev = pRom;
2140 pRom = pRom->pNextR3;
2141 }
2142
2143 /*
2144 * Find the RAM location and check for conflicts.
2145 *
2146 * Conflict detection is a bit different than for RAM
2147 * registration since a ROM can be located within a RAM
2148 * range. So, what we have to check for is other memory
2149 * types (other than RAM that is) and that we don't span
2150 * more than one RAM range (layz).
2151 */
2152 bool fRamExists = false;
2153 PPGMRAMRANGE pRamPrev = NULL;
2154 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2155 while (pRam && GCPhysLast >= pRam->GCPhys)
2156 {
2157 if ( GCPhys <= pRam->GCPhysLast
2158 && GCPhysLast >= pRam->GCPhys)
2159 {
2160 /* completely within? */
2161 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2162 && GCPhysLast <= pRam->GCPhysLast,
2163 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
2164 GCPhys, GCPhysLast, pszDesc,
2165 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2166 VERR_PGM_RAM_CONFLICT);
2167 fRamExists = true;
2168 break;
2169 }
2170
2171 /* next */
2172 pRamPrev = pRam;
2173 pRam = pRam->pNextR3;
2174 }
2175 if (fRamExists)
2176 {
2177 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2178 uint32_t cPagesLeft = cPages;
2179 while (cPagesLeft-- > 0)
2180 {
2181 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2182 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
2183 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
2184 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
2185 Assert(PGM_PAGE_IS_ZERO(pPage));
2186 pPage++;
2187 }
2188 }
2189
2190 /*
2191 * Update the base memory reservation if necessary.
2192 */
2193 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
2194 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2195 cExtraBaseCost += cPages;
2196 if (cExtraBaseCost)
2197 {
2198 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
2199 if (RT_FAILURE(rc))
2200 return rc;
2201 }
2202
2203 /*
2204 * Allocate memory for the virgin copy of the RAM.
2205 */
2206 PGMMALLOCATEPAGESREQ pReq;
2207 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
2208 AssertRCReturn(rc, rc);
2209
2210 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2211 {
2212 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
2213 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
2214 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
2215 }
2216
2217 pgmLock(pVM);
2218 rc = GMMR3AllocatePagesPerform(pVM, pReq);
2219 pgmUnlock(pVM);
2220 if (RT_FAILURE(rc))
2221 {
2222 GMMR3AllocatePagesCleanup(pReq);
2223 return rc;
2224 }
2225
2226 /*
2227 * Allocate the new ROM range and RAM range (if necessary).
2228 */
2229 PPGMROMRANGE pRomNew;
2230 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
2231 if (RT_SUCCESS(rc))
2232 {
2233 PPGMRAMRANGE pRamNew = NULL;
2234 if (!fRamExists)
2235 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
2236 if (RT_SUCCESS(rc))
2237 {
2238 pgmLock(pVM);
2239
2240 /*
2241 * Initialize and insert the RAM range (if required).
2242 */
2243 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
2244 if (!fRamExists)
2245 {
2246 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
2247 pRamNew->pSelfRC = MMHyperCCToRC(pVM, pRamNew);
2248 pRamNew->GCPhys = GCPhys;
2249 pRamNew->GCPhysLast = GCPhysLast;
2250 pRamNew->cb = cb;
2251 pRamNew->pszDesc = pszDesc;
2252 pRamNew->fFlags = 0;
2253 pRamNew->pvR3 = NULL;
2254
2255 PPGMPAGE pPage = &pRamNew->aPages[0];
2256 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2257 {
2258 PGM_PAGE_INIT(pPage,
2259 pReq->aPages[iPage].HCPhysGCPhys,
2260 pReq->aPages[iPage].idPage,
2261 PGMPAGETYPE_ROM,
2262 PGM_PAGE_STATE_ALLOCATED);
2263
2264 pRomPage->Virgin = *pPage;
2265 }
2266
2267 pVM->pgm.s.cAllPages += cPages;
2268 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
2269 }
2270 else
2271 {
2272 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2273 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2274 {
2275 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
2276 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
2277 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
2278 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
2279
2280 pRomPage->Virgin = *pPage;
2281 }
2282
2283 pRamNew = pRam;
2284
2285 pVM->pgm.s.cZeroPages -= cPages;
2286 }
2287 pVM->pgm.s.cPrivatePages += cPages;
2288
2289 pgmUnlock(pVM);
2290
2291
2292 /*
2293 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
2294 *
2295 * If it's shadowed we'll register the handler after the ROM notification
2296 * so we get the access handler callbacks that we should. If it isn't
2297 * shadowed we'll do it the other way around to make REM use the built-in
2298 * ROM behavior and not the handler behavior (which is to route all access
2299 * to PGM atm).
2300 */
2301 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2302 {
2303 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
2304 rc = PGMR3HandlerPhysicalRegister(pVM,
2305 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2306 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2307 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2308 GCPhys, GCPhysLast,
2309 pgmR3PhysRomWriteHandler, pRomNew,
2310 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2311 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2312 }
2313 else
2314 {
2315 rc = PGMR3HandlerPhysicalRegister(pVM,
2316 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2317 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2318 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2319 GCPhys, GCPhysLast,
2320 pgmR3PhysRomWriteHandler, pRomNew,
2321 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2322 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2323 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
2324 }
2325 if (RT_SUCCESS(rc))
2326 {
2327 pgmLock(pVM);
2328
2329 /*
2330 * Copy the image over to the virgin pages.
2331 * This must be done after linking in the RAM range.
2332 */
2333 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
2334 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
2335 {
2336 void *pvDstPage;
2337 PPGMPAGEMAP pMapIgnored;
2338 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
2339 if (RT_FAILURE(rc))
2340 {
2341 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
2342 break;
2343 }
2344 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
2345 }
2346 if (RT_SUCCESS(rc))
2347 {
2348 /*
2349 * Initialize the ROM range.
2350 * Note that the Virgin member of the pages has already been initialized above.
2351 */
2352 pRomNew->GCPhys = GCPhys;
2353 pRomNew->GCPhysLast = GCPhysLast;
2354 pRomNew->cb = cb;
2355 pRomNew->fFlags = fFlags;
2356 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY ? pvBinary : NULL;
2357 pRomNew->pszDesc = pszDesc;
2358
2359 for (unsigned iPage = 0; iPage < cPages; iPage++)
2360 {
2361 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
2362 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
2363 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
2364 }
2365
2366 /* update the page count stats */
2367 pVM->pgm.s.cZeroPages += cPages;
2368 pVM->pgm.s.cAllPages += cPages;
2369
2370 /*
2371 * Insert the ROM range, tell REM and return successfully.
2372 */
2373 pRomNew->pNextR3 = pRom;
2374 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
2375 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
2376
2377 if (pRomPrev)
2378 {
2379 pRomPrev->pNextR3 = pRomNew;
2380 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
2381 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
2382 }
2383 else
2384 {
2385 pVM->pgm.s.pRomRangesR3 = pRomNew;
2386 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
2387 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
2388 }
2389
2390 GMMR3AllocatePagesCleanup(pReq);
2391 pgmUnlock(pVM);
2392 return VINF_SUCCESS;
2393 }
2394
2395 /* bail out */
2396
2397 pgmUnlock(pVM);
2398 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2399 AssertRC(rc2);
2400 pgmLock(pVM);
2401 }
2402
2403 if (!fRamExists)
2404 {
2405 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
2406 MMHyperFree(pVM, pRamNew);
2407 }
2408 }
2409 MMHyperFree(pVM, pRomNew);
2410 }
2411
2412 /** @todo Purge the mapping cache or something... */
2413 GMMR3FreeAllocatedPages(pVM, pReq);
2414 GMMR3AllocatePagesCleanup(pReq);
2415 pgmUnlock(pVM);
2416 return rc;
2417}
2418
2419
2420/**
2421 * \#PF Handler callback for ROM write accesses.
2422 *
2423 * @returns VINF_SUCCESS if the handler have carried out the operation.
2424 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2425 * @param pVM VM Handle.
2426 * @param GCPhys The physical address the guest is writing to.
2427 * @param pvPhys The HC mapping of that address.
2428 * @param pvBuf What the guest is reading/writing.
2429 * @param cbBuf How much it's reading/writing.
2430 * @param enmAccessType The access type.
2431 * @param pvUser User argument.
2432 */
2433static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
2434{
2435 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
2436 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2437 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
2438 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2439 Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
2440
2441 if (enmAccessType == PGMACCESSTYPE_READ)
2442 {
2443 switch (pRomPage->enmProt)
2444 {
2445 /*
2446 * Take the default action.
2447 */
2448 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2449 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2450 case PGMROMPROT_READ_ROM_WRITE_RAM:
2451 case PGMROMPROT_READ_RAM_WRITE_RAM:
2452 return VINF_PGM_HANDLER_DO_DEFAULT;
2453
2454 default:
2455 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2456 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2457 VERR_INTERNAL_ERROR);
2458 }
2459 }
2460 else
2461 {
2462 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2463 switch (pRomPage->enmProt)
2464 {
2465 /*
2466 * Ignore writes.
2467 */
2468 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2469 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2470 return VINF_SUCCESS;
2471
2472 /*
2473 * Write to the ram page.
2474 */
2475 case PGMROMPROT_READ_ROM_WRITE_RAM:
2476 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
2477 {
2478 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
2479 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
2480
2481 /*
2482 * Take the lock, do lazy allocation, map the page and copy the data.
2483 *
2484 * Note that we have to bypass the mapping TLB since it works on
2485 * guest physical addresses and entering the shadow page would
2486 * kind of screw things up...
2487 */
2488 int rc = pgmLock(pVM);
2489 AssertRC(rc);
2490 PPGMPAGE pShadowPage = &pRomPage->Shadow;
2491 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
2492 {
2493 pShadowPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2494 AssertLogRelReturn(pShadowPage, VERR_INTERNAL_ERROR);
2495 }
2496
2497 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pShadowPage) != PGM_PAGE_STATE_ALLOCATED))
2498 {
2499 rc = pgmPhysPageMakeWritable(pVM, pShadowPage, GCPhys);
2500 if (RT_FAILURE(rc))
2501 {
2502 pgmUnlock(pVM);
2503 return rc;
2504 }
2505 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
2506 }
2507
2508 void *pvDstPage;
2509 PPGMPAGEMAP pMapIgnored;
2510 int rc2 = pgmPhysPageMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
2511 if (RT_SUCCESS(rc2))
2512 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
2513 else
2514 rc = rc2;
2515
2516 pgmUnlock(pVM);
2517 return rc;
2518 }
2519
2520 default:
2521 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2522 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2523 VERR_INTERNAL_ERROR);
2524 }
2525 }
2526}
2527
2528
2529/**
2530 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
2531 * and verify that the virgin part is untouched.
2532 *
2533 * This is done after the normal memory has been cleared.
2534 *
2535 * ASSUMES that the caller owns the PGM lock.
2536 *
2537 * @param pVM The VM handle.
2538 */
2539int pgmR3PhysRomReset(PVM pVM)
2540{
2541 Assert(PGMIsLockOwner(pVM));
2542 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2543 {
2544 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
2545
2546 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2547 {
2548 /*
2549 * Reset the physical handler.
2550 */
2551 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
2552 AssertRCReturn(rc, rc);
2553
2554 /*
2555 * What we do with the shadow pages depends on the memory
2556 * preallocation option. If not enabled, we'll just throw
2557 * out all the dirty pages and replace them by the zero page.
2558 */
2559 if (!pVM->pgm.s.fRamPreAlloc)
2560 {
2561 /* Free the dirty pages. */
2562 uint32_t cPendingPages = 0;
2563 PGMMFREEPAGESREQ pReq;
2564 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2565 AssertRCReturn(rc, rc);
2566
2567 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2568 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
2569 {
2570 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
2571 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow, pRom->GCPhys + (iPage << PAGE_SHIFT));
2572 AssertLogRelRCReturn(rc, rc);
2573 }
2574
2575 if (cPendingPages)
2576 {
2577 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2578 AssertLogRelRCReturn(rc, rc);
2579 }
2580 GMMR3FreePagesCleanup(pReq);
2581 }
2582 else
2583 {
2584 /* clear all the shadow pages. */
2585 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2586 {
2587 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO);
2588
2589 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2590 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
2591 if (RT_FAILURE(rc))
2592 break;
2593
2594 void *pvDstPage;
2595 PPGMPAGEMAP pMapIgnored;
2596 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
2597 if (RT_FAILURE(rc))
2598 break;
2599 ASMMemZeroPage(pvDstPage);
2600 }
2601 AssertRCReturn(rc, rc);
2602 }
2603 }
2604
2605#ifdef VBOX_STRICT
2606 /*
2607 * Verify that the virgin page is unchanged if possible.
2608 */
2609 if (pRom->pvOriginal)
2610 {
2611 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
2612 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
2613 {
2614 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2615 PPGMPAGEMAP pMapIgnored;
2616 void *pvDstPage;
2617 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
2618 if (RT_FAILURE(rc))
2619 break;
2620 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
2621 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
2622 GCPhys, pRom->pszDesc));
2623 }
2624 }
2625#endif
2626 }
2627
2628 return VINF_SUCCESS;
2629}
2630
2631
2632/**
2633 * Change the shadowing of a range of ROM pages.
2634 *
2635 * This is intended for implementing chipset specific memory registers
2636 * and will not be very strict about the input. It will silently ignore
2637 * any pages that are not the part of a shadowed ROM.
2638 *
2639 * @returns VBox status code.
2640 * @retval VINF_PGM_SYNC_CR3
2641 *
2642 * @param pVM Pointer to the shared VM structure.
2643 * @param GCPhys Where to start. Page aligned.
2644 * @param cb How much to change. Page aligned.
2645 * @param enmProt The new ROM protection.
2646 */
2647VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
2648{
2649 /*
2650 * Check input
2651 */
2652 if (!cb)
2653 return VINF_SUCCESS;
2654 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2655 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2656 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2657 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2658 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
2659
2660 /*
2661 * Process the request.
2662 */
2663 pgmLock(pVM);
2664 int rc = VINF_SUCCESS;
2665 bool fFlushTLB = false;
2666 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2667 {
2668 if ( GCPhys <= pRom->GCPhysLast
2669 && GCPhysLast >= pRom->GCPhys
2670 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
2671 {
2672 /*
2673 * Iterate the relevant pages and make necessary the changes.
2674 */
2675 bool fChanges = false;
2676 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
2677 ? pRom->cb >> PAGE_SHIFT
2678 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
2679 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2680 iPage < cPages;
2681 iPage++)
2682 {
2683 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2684 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
2685 {
2686 fChanges = true;
2687
2688 /* flush references to the page. */
2689 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
2690 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pRamPage, &fFlushTLB);
2691 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
2692 rc = rc2;
2693
2694 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2695 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2696
2697 *pOld = *pRamPage;
2698 *pRamPage = *pNew;
2699 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
2700 }
2701 pRomPage->enmProt = enmProt;
2702 }
2703
2704 /*
2705 * Reset the access handler if we made changes, no need
2706 * to optimize this.
2707 */
2708 if (fChanges)
2709 {
2710 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
2711 if (RT_FAILURE(rc))
2712 {
2713 pgmUnlock(pVM);
2714 AssertRC(rc);
2715 return rc;
2716 }
2717 }
2718
2719 /* Advance - cb isn't updated. */
2720 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
2721 }
2722 }
2723 pgmUnlock(pVM);
2724 if (fFlushTLB)
2725 PGM_INVL_ALL_VCPU_TLBS(pVM);
2726
2727 return rc;
2728}
2729
2730
2731/**
2732 * Sets the Address Gate 20 state.
2733 *
2734 * @param pVCpu The VCPU to operate on.
2735 * @param fEnable True if the gate should be enabled.
2736 * False if the gate should be disabled.
2737 */
2738VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
2739{
2740 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
2741 if (pVCpu->pgm.s.fA20Enabled != fEnable)
2742 {
2743 pVCpu->pgm.s.fA20Enabled = fEnable;
2744 pVCpu->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2745 REMR3A20Set(pVCpu->pVMR3, pVCpu, fEnable);
2746 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
2747 }
2748}
2749
2750
2751/**
2752 * Tree enumeration callback for dealing with age rollover.
2753 * It will perform a simple compression of the current age.
2754 */
2755static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2756{
2757 Assert(PGMIsLockOwner((PVM)pvUser));
2758 /* Age compression - ASSUMES iNow == 4. */
2759 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2760 if (pChunk->iAge >= UINT32_C(0xffffff00))
2761 pChunk->iAge = 3;
2762 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2763 pChunk->iAge = 2;
2764 else if (pChunk->iAge)
2765 pChunk->iAge = 1;
2766 else /* iAge = 0 */
2767 pChunk->iAge = 4;
2768
2769 /* reinsert */
2770 PVM pVM = (PVM)pvUser;
2771 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2772 pChunk->AgeCore.Key = pChunk->iAge;
2773 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2774 return 0;
2775}
2776
2777
2778/**
2779 * Tree enumeration callback that updates the chunks that have
2780 * been used since the last
2781 */
2782static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2783{
2784 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2785 if (!pChunk->iAge)
2786 {
2787 PVM pVM = (PVM)pvUser;
2788 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2789 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2790 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2791 }
2792
2793 return 0;
2794}
2795
2796
2797/**
2798 * Performs ageing of the ring-3 chunk mappings.
2799 *
2800 * @param pVM The VM handle.
2801 */
2802VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2803{
2804 pgmLock(pVM);
2805 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2806 pVM->pgm.s.ChunkR3Map.iNow++;
2807 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2808 {
2809 pVM->pgm.s.ChunkR3Map.iNow = 4;
2810 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2811 }
2812 else
2813 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2814 pgmUnlock(pVM);
2815}
2816
2817
2818/**
2819 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2820 */
2821typedef struct PGMR3PHYSCHUNKUNMAPCB
2822{
2823 PVM pVM; /**< The VM handle. */
2824 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2825} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2826
2827
2828/**
2829 * Callback used to find the mapping that's been unused for
2830 * the longest time.
2831 */
2832static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2833{
2834 do
2835 {
2836 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2837 if ( pChunk->iAge
2838 && !pChunk->cRefs)
2839 {
2840 /*
2841 * Check that it's not in any of the TLBs.
2842 */
2843 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2844 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2845 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2846 {
2847 pChunk = NULL;
2848 break;
2849 }
2850 if (pChunk)
2851 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2852 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2853 {
2854 pChunk = NULL;
2855 break;
2856 }
2857 if (pChunk)
2858 {
2859 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2860 return 1; /* done */
2861 }
2862 }
2863
2864 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2865 pNode = pNode->pList;
2866 } while (pNode);
2867 return 0;
2868}
2869
2870
2871/**
2872 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2873 *
2874 * The candidate will not be part of any TLBs, so no need to flush
2875 * anything afterwards.
2876 *
2877 * @returns Chunk id.
2878 * @param pVM The VM handle.
2879 */
2880static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2881{
2882 Assert(PGMIsLockOwner(pVM));
2883
2884 /*
2885 * Do tree ageing first?
2886 */
2887 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2888 PGMR3PhysChunkAgeing(pVM);
2889
2890 /*
2891 * Enumerate the age tree starting with the left most node.
2892 */
2893 PGMR3PHYSCHUNKUNMAPCB Args;
2894 Args.pVM = pVM;
2895 Args.pChunk = NULL;
2896 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2897 return Args.pChunk->Core.Key;
2898 return INT32_MAX;
2899}
2900
2901
2902/**
2903 * Maps the given chunk into the ring-3 mapping cache.
2904 *
2905 * This will call ring-0.
2906 *
2907 * @returns VBox status code.
2908 * @param pVM The VM handle.
2909 * @param idChunk The chunk in question.
2910 * @param ppChunk Where to store the chunk tracking structure.
2911 *
2912 * @remarks Called from within the PGM critical section.
2913 */
2914int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
2915{
2916 int rc;
2917
2918 Assert(PGMIsLockOwner(pVM));
2919 /*
2920 * Allocate a new tracking structure first.
2921 */
2922#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2923 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
2924#else
2925 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
2926#endif
2927 AssertReturn(pChunk, VERR_NO_MEMORY);
2928 pChunk->Core.Key = idChunk;
2929 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
2930 pChunk->iAge = 0;
2931 pChunk->cRefs = 0;
2932 pChunk->cPermRefs = 0;
2933 pChunk->pv = NULL;
2934
2935 /*
2936 * Request the ring-0 part to map the chunk in question and if
2937 * necessary unmap another one to make space in the mapping cache.
2938 */
2939 GMMMAPUNMAPCHUNKREQ Req;
2940 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
2941 Req.Hdr.cbReq = sizeof(Req);
2942 Req.pvR3 = NULL;
2943 Req.idChunkMap = idChunk;
2944 Req.idChunkUnmap = NIL_GMM_CHUNKID;
2945 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
2946 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
2947/** @todo This is wrong. Any thread in the VM process should be able to do this,
2948 * there are depenenecies on this. What currently saves the day is that
2949 * we don't unmap anything and that all non-zero memory will therefore
2950 * be present when non-EMTs tries to access it. */
2951 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
2952 if (RT_SUCCESS(rc))
2953 {
2954 /*
2955 * Update the tree.
2956 */
2957 /* insert the new one. */
2958 AssertPtr(Req.pvR3);
2959 pChunk->pv = Req.pvR3;
2960 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2961 AssertRelease(fRc);
2962 pVM->pgm.s.ChunkR3Map.c++;
2963
2964 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2965 AssertRelease(fRc);
2966
2967 /* remove the unmapped one. */
2968 if (Req.idChunkUnmap != NIL_GMM_CHUNKID)
2969 {
2970 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2971 AssertRelease(pUnmappedChunk);
2972 pUnmappedChunk->pv = NULL;
2973 pUnmappedChunk->Core.Key = UINT32_MAX;
2974#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2975 MMR3HeapFree(pUnmappedChunk);
2976#else
2977 MMR3UkHeapFree(pVM, pUnmappedChunk, MM_TAG_PGM_CHUNK_MAPPING);
2978#endif
2979 pVM->pgm.s.ChunkR3Map.c--;
2980 }
2981 }
2982 else
2983 {
2984 AssertRC(rc);
2985#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2986 MMR3HeapFree(pChunk);
2987#else
2988 MMR3UkHeapFree(pVM, pChunk, MM_TAG_PGM_CHUNK_MAPPING);
2989#endif
2990 pChunk = NULL;
2991 }
2992
2993 *ppChunk = pChunk;
2994 return rc;
2995}
2996
2997
2998/**
2999 * For VMMCALLRING3_PGM_MAP_CHUNK, considered internal.
3000 *
3001 * @returns see pgmR3PhysChunkMap.
3002 * @param pVM The VM handle.
3003 * @param idChunk The chunk to map.
3004 */
3005VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
3006{
3007 PPGMCHUNKR3MAP pChunk;
3008 int rc;
3009
3010 pgmLock(pVM);
3011 rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
3012 pgmUnlock(pVM);
3013 return rc;
3014}
3015
3016
3017/**
3018 * Invalidates the TLB for the ring-3 mapping cache.
3019 *
3020 * @param pVM The VM handle.
3021 */
3022VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
3023{
3024 pgmLock(pVM);
3025 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3026 {
3027 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
3028 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
3029 }
3030 pgmUnlock(pVM);
3031}
3032
3033
3034/**
3035 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES.
3036 *
3037 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
3038 * signal and clear the out of memory condition. When contracted, this API is
3039 * used to try clear the condition when the user wants to resume.
3040 *
3041 * @returns The following VBox status codes.
3042 * @retval VINF_SUCCESS on success. FFs cleared.
3043 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
3044 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
3045 *
3046 * @param pVM The VM handle.
3047 *
3048 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
3049 * in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
3050 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
3051 * handler.
3052 */
3053VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
3054{
3055 pgmLock(pVM);
3056
3057 /*
3058 * Allocate more pages, noting down the index of the first new page.
3059 */
3060 uint32_t iClear = pVM->pgm.s.cHandyPages;
3061 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
3062 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
3063 int rcAlloc = VINF_SUCCESS;
3064 int rcSeed = VINF_SUCCESS;
3065 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3066 while (rc == VERR_GMM_SEED_ME)
3067 {
3068 void *pvChunk;
3069 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
3070 if (RT_SUCCESS(rc))
3071 {
3072 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
3073 if (RT_FAILURE(rc))
3074 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
3075 }
3076 if (RT_SUCCESS(rc))
3077 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3078 }
3079
3080 if (RT_SUCCESS(rc))
3081 {
3082 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
3083 Assert(pVM->pgm.s.cHandyPages > 0);
3084 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3085 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
3086
3087 /*
3088 * Clear the pages.
3089 */
3090 while (iClear < pVM->pgm.s.cHandyPages)
3091 {
3092 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
3093 void *pv;
3094 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
3095 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
3096 ASMMemZeroPage(pv);
3097 iClear++;
3098 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
3099 }
3100 }
3101 else
3102 {
3103 /*
3104 * We should never get here unless there is a genuine shortage of
3105 * memory (or some internal error). Flag the error so the VM can be
3106 * suspended ASAP and the user informed. If we're totally out of
3107 * handy pages we will return failure.
3108 */
3109 /* Report the failure. */
3110 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
3111 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
3112 rc, rcSeed, rcAlloc,
3113 pVM->pgm.s.cHandyPages,
3114 pVM->pgm.s.cAllPages,
3115 pVM->pgm.s.cPrivatePages,
3116 pVM->pgm.s.cSharedPages,
3117 pVM->pgm.s.cZeroPages));
3118 if ( rc != VERR_NO_MEMORY
3119 && rc != VERR_LOCK_FAILED)
3120 {
3121 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3122 {
3123 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
3124 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
3125 pVM->pgm.s.aHandyPages[i].idSharedPage));
3126 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
3127 if (idPage != NIL_GMM_PAGEID)
3128 {
3129 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
3130 pRam;
3131 pRam = pRam->pNextR3)
3132 {
3133 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
3134 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3135 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
3136 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
3137 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
3138 }
3139 }
3140 }
3141 }
3142
3143 /* Set the FFs and adjust rc. */
3144 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3145 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
3146 if ( rc == VERR_NO_MEMORY
3147 || rc == VERR_LOCK_FAILED)
3148 rc = VINF_EM_NO_MEMORY;
3149 }
3150
3151 pgmUnlock(pVM);
3152 return rc;
3153}
3154
3155
3156/**
3157 * Frees the specified RAM page and replaces it with the ZERO page.
3158 *
3159 * This is used by ballooning, remapping MMIO2 and RAM reset.
3160 *
3161 * @param pVM Pointer to the shared VM structure.
3162 * @param pReq Pointer to the request.
3163 * @param pPage Pointer to the page structure.
3164 * @param GCPhys The guest physical address of the page, if applicable.
3165 *
3166 * @remarks The caller must own the PGM lock.
3167 */
3168static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
3169{
3170 /*
3171 * Assert sanity.
3172 */
3173 Assert(PGMIsLockOwner(pVM));
3174 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
3175 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
3176 {
3177 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3178 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
3179 }
3180
3181 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
3182 return VINF_SUCCESS;
3183
3184 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
3185 Log3(("pgmPhysFreePage: idPage=%#x HCPhys=%RGp pPage=%R[pgmpage]\n", idPage, pPage));
3186 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
3187 || idPage > GMM_PAGEID_LAST
3188 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
3189 {
3190 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3191 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
3192 }
3193
3194 /* update page count stats. */
3195 if (PGM_PAGE_IS_SHARED(pPage))
3196 pVM->pgm.s.cSharedPages--;
3197 else
3198 pVM->pgm.s.cPrivatePages--;
3199 pVM->pgm.s.cZeroPages++;
3200
3201 /*
3202 * pPage = ZERO page.
3203 */
3204 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
3205 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
3206 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
3207
3208 /*
3209 * Make sure it's not in the handy page array.
3210 */
3211 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3212 {
3213 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
3214 {
3215 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
3216 break;
3217 }
3218 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
3219 {
3220 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
3221 break;
3222 }
3223 }
3224
3225 /*
3226 * Push it onto the page array.
3227 */
3228 uint32_t iPage = *pcPendingPages;
3229 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
3230 *pcPendingPages += 1;
3231
3232 pReq->aPages[iPage].idPage = idPage;
3233
3234 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
3235 return VINF_SUCCESS;
3236
3237 /*
3238 * Flush the pages.
3239 */
3240 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
3241 if (RT_SUCCESS(rc))
3242 {
3243 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
3244 *pcPendingPages = 0;
3245 }
3246 return rc;
3247}
3248
3249
3250/**
3251 * Converts a GC physical address to a HC ring-3 pointer, with some
3252 * additional checks.
3253 *
3254 * @returns VBox status code.
3255 * @retval VINF_SUCCESS on success.
3256 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3257 * access handler of some kind.
3258 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3259 * accesses or is odd in any way.
3260 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3261 *
3262 * @param pVM The VM handle.
3263 * @param GCPhys The GC physical address to convert.
3264 * @param fWritable Whether write access is required.
3265 * @param ppv Where to store the pointer corresponding to GCPhys on
3266 * success.
3267 */
3268VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
3269{
3270 pgmLock(pVM);
3271
3272 PPGMRAMRANGE pRam;
3273 PPGMPAGE pPage;
3274 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
3275 if (RT_SUCCESS(rc))
3276 {
3277 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3278 rc = VINF_SUCCESS;
3279 else
3280 {
3281 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3282 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3283 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3284 {
3285 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
3286 * in -norawr0 mode. */
3287 if (fWritable)
3288 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3289 }
3290 else
3291 {
3292 /* Temporarily disabled physical handler(s), since the recompiler
3293 doesn't get notified when it's reset we'll have to pretend it's
3294 operating normally. */
3295 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3296 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3297 else
3298 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3299 }
3300 }
3301 if (RT_SUCCESS(rc))
3302 {
3303 int rc2;
3304
3305 /* Make sure what we return is writable. */
3306 if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
3307 switch (PGM_PAGE_GET_STATE(pPage))
3308 {
3309 case PGM_PAGE_STATE_ALLOCATED:
3310 break;
3311 case PGM_PAGE_STATE_ZERO:
3312 case PGM_PAGE_STATE_SHARED:
3313 case PGM_PAGE_STATE_WRITE_MONITORED:
3314 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
3315 AssertLogRelRCReturn(rc2, rc2);
3316 break;
3317 }
3318
3319 /* Get a ring-3 mapping of the address. */
3320 PPGMPAGER3MAPTLBE pTlbe;
3321 rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
3322 AssertLogRelRCReturn(rc2, rc2);
3323 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
3324 /** @todo mapping/locking hell; this isn't horribly efficient since
3325 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
3326
3327 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3328 }
3329 else
3330 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3331
3332 /* else: handler catching all access, no pointer returned. */
3333 }
3334 else
3335 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3336
3337 pgmUnlock(pVM);
3338 return rc;
3339}
3340
3341
3342
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette