VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp@ 81792

Last change on this file since 81792 was 81705, checked in by vboxsync, 5 years ago

PGM: Fixed getting MMIO2 mapping address mess. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 206.1 KB
Line 
1/* $Id: PGMPhys.cpp 81705 2019-11-06 11:58:44Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/iem.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/nem.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/pdmdev.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32
33#include "PGMInline.h"
34
35#include <VBox/sup.h>
36#include <VBox/param.h>
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/alloc.h>
41#include <iprt/asm.h>
42#ifdef VBOX_STRICT
43# include <iprt/crc.h>
44#endif
45#include <iprt/thread.h>
46#include <iprt/string.h>
47#include <iprt/system.h>
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** The number of pages to free in one batch. */
54#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
55
56
57/*
58 * PGMR3PhysReadU8-64
59 * PGMR3PhysWriteU8-64
60 */
61#define PGMPHYSFN_READNAME PGMR3PhysReadU8
62#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
63#define PGMPHYS_DATASIZE 1
64#define PGMPHYS_DATATYPE uint8_t
65#include "PGMPhysRWTmpl.h"
66
67#define PGMPHYSFN_READNAME PGMR3PhysReadU16
68#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
69#define PGMPHYS_DATASIZE 2
70#define PGMPHYS_DATATYPE uint16_t
71#include "PGMPhysRWTmpl.h"
72
73#define PGMPHYSFN_READNAME PGMR3PhysReadU32
74#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
75#define PGMPHYS_DATASIZE 4
76#define PGMPHYS_DATATYPE uint32_t
77#include "PGMPhysRWTmpl.h"
78
79#define PGMPHYSFN_READNAME PGMR3PhysReadU64
80#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
81#define PGMPHYS_DATASIZE 8
82#define PGMPHYS_DATATYPE uint64_t
83#include "PGMPhysRWTmpl.h"
84
85
86/**
87 * EMT worker for PGMR3PhysReadExternal.
88 */
89static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead,
90 PGMACCESSORIGIN enmOrigin)
91{
92 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead, enmOrigin);
93 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
94 return VINF_SUCCESS;
95}
96
97
98/**
99 * Read from physical memory, external users.
100 *
101 * @returns VBox status code.
102 * @retval VINF_SUCCESS.
103 *
104 * @param pVM The cross context VM structure.
105 * @param GCPhys Physical address to read from.
106 * @param pvBuf Where to read into.
107 * @param cbRead How many bytes to read.
108 * @param enmOrigin Who is calling.
109 *
110 * @thread Any but EMTs.
111 */
112VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
113{
114 VM_ASSERT_OTHER_THREAD(pVM);
115
116 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
117 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
118
119 pgmLock(pVM);
120
121 /*
122 * Copy loop on ram ranges.
123 */
124 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
125 for (;;)
126 {
127 /* Inside range or not? */
128 if (pRam && GCPhys >= pRam->GCPhys)
129 {
130 /*
131 * Must work our way thru this page by page.
132 */
133 RTGCPHYS off = GCPhys - pRam->GCPhys;
134 while (off < pRam->cb)
135 {
136 unsigned iPage = off >> PAGE_SHIFT;
137 PPGMPAGE pPage = &pRam->aPages[iPage];
138
139 /*
140 * If the page has an ALL access handler, we'll have to
141 * delegate the job to EMT.
142 */
143 if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
144 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
145 {
146 pgmUnlock(pVM);
147
148 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 5,
149 pVM, &GCPhys, pvBuf, cbRead, enmOrigin);
150 }
151 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
152
153 /*
154 * Simple stuff, go ahead.
155 */
156 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
157 if (cb > cbRead)
158 cb = cbRead;
159 PGMPAGEMAPLOCK PgMpLck;
160 const void *pvSrc;
161 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
162 if (RT_SUCCESS(rc))
163 {
164 memcpy(pvBuf, pvSrc, cb);
165 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
166 }
167 else
168 {
169 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
170 pRam->GCPhys + off, pPage, rc));
171 memset(pvBuf, 0xff, cb);
172 }
173
174 /* next page */
175 if (cb >= cbRead)
176 {
177 pgmUnlock(pVM);
178 return VINF_SUCCESS;
179 }
180 cbRead -= cb;
181 off += cb;
182 GCPhys += cb;
183 pvBuf = (char *)pvBuf + cb;
184 } /* walk pages in ram range. */
185 }
186 else
187 {
188 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
189
190 /*
191 * Unassigned address space.
192 */
193 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
194 if (cb >= cbRead)
195 {
196 memset(pvBuf, 0xff, cbRead);
197 break;
198 }
199 memset(pvBuf, 0xff, cb);
200
201 cbRead -= cb;
202 pvBuf = (char *)pvBuf + cb;
203 GCPhys += cb;
204 }
205
206 /* Advance range if necessary. */
207 while (pRam && GCPhys > pRam->GCPhysLast)
208 pRam = pRam->CTX_SUFF(pNext);
209 } /* Ram range walk */
210
211 pgmUnlock(pVM);
212
213 return VINF_SUCCESS;
214}
215
216
217/**
218 * EMT worker for PGMR3PhysWriteExternal.
219 */
220static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite,
221 PGMACCESSORIGIN enmOrigin)
222{
223 /** @todo VERR_EM_NO_MEMORY */
224 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite, enmOrigin);
225 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
226 return VINF_SUCCESS;
227}
228
229
230/**
231 * Write to physical memory, external users.
232 *
233 * @returns VBox status code.
234 * @retval VINF_SUCCESS.
235 * @retval VERR_EM_NO_MEMORY.
236 *
237 * @param pVM The cross context VM structure.
238 * @param GCPhys Physical address to write to.
239 * @param pvBuf What to write.
240 * @param cbWrite How many bytes to write.
241 * @param enmOrigin Who is calling.
242 *
243 * @thread Any but EMTs.
244 */
245VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
246{
247 VM_ASSERT_OTHER_THREAD(pVM);
248
249 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
250 ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x enmOrigin=%d\n",
251 GCPhys, cbWrite, enmOrigin));
252 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
253 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
254
255 pgmLock(pVM);
256
257 /*
258 * Copy loop on ram ranges, stop when we hit something difficult.
259 */
260 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
261 for (;;)
262 {
263 /* Inside range or not? */
264 if (pRam && GCPhys >= pRam->GCPhys)
265 {
266 /*
267 * Must work our way thru this page by page.
268 */
269 RTGCPTR off = GCPhys - pRam->GCPhys;
270 while (off < pRam->cb)
271 {
272 RTGCPTR iPage = off >> PAGE_SHIFT;
273 PPGMPAGE pPage = &pRam->aPages[iPage];
274
275 /*
276 * Is the page problematic, we have to do the work on the EMT.
277 *
278 * Allocating writable pages and access handlers are
279 * problematic, write monitored pages are simple and can be
280 * dealt with here.
281 */
282 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
283 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
284 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
285 {
286 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
287 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
288 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
289 else
290 {
291 pgmUnlock(pVM);
292
293 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 5,
294 pVM, &GCPhys, pvBuf, cbWrite, enmOrigin);
295 }
296 }
297 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
298
299 /*
300 * Simple stuff, go ahead.
301 */
302 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
303 if (cb > cbWrite)
304 cb = cbWrite;
305 PGMPAGEMAPLOCK PgMpLck;
306 void *pvDst;
307 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
308 if (RT_SUCCESS(rc))
309 {
310 memcpy(pvDst, pvBuf, cb);
311 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
312 }
313 else
314 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
315 pRam->GCPhys + off, pPage, rc));
316
317 /* next page */
318 if (cb >= cbWrite)
319 {
320 pgmUnlock(pVM);
321 return VINF_SUCCESS;
322 }
323
324 cbWrite -= cb;
325 off += cb;
326 GCPhys += cb;
327 pvBuf = (const char *)pvBuf + cb;
328 } /* walk pages in ram range */
329 }
330 else
331 {
332 /*
333 * Unassigned address space, skip it.
334 */
335 if (!pRam)
336 break;
337 size_t cb = pRam->GCPhys - GCPhys;
338 if (cb >= cbWrite)
339 break;
340 cbWrite -= cb;
341 pvBuf = (const char *)pvBuf + cb;
342 GCPhys += cb;
343 }
344
345 /* Advance range if necessary. */
346 while (pRam && GCPhys > pRam->GCPhysLast)
347 pRam = pRam->CTX_SUFF(pNext);
348 } /* Ram range walk */
349
350 pgmUnlock(pVM);
351 return VINF_SUCCESS;
352}
353
354
355/**
356 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
357 *
358 * @returns see PGMR3PhysGCPhys2CCPtrExternal
359 * @param pVM The cross context VM structure.
360 * @param pGCPhys Pointer to the guest physical address.
361 * @param ppv Where to store the mapping address.
362 * @param pLock Where to store the lock.
363 */
364static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
365{
366 /*
367 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
368 * an access handler after it succeeds.
369 */
370 int rc = pgmLock(pVM);
371 AssertRCReturn(rc, rc);
372
373 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
374 if (RT_SUCCESS(rc))
375 {
376 PPGMPAGEMAPTLBE pTlbe;
377 int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
378 AssertFatalRC(rc2);
379 PPGMPAGE pPage = pTlbe->pPage;
380 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
381 {
382 PGMPhysReleasePageMappingLock(pVM, pLock);
383 rc = VERR_PGM_PHYS_PAGE_RESERVED;
384 }
385 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
386#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
387 || pgmPoolIsDirtyPage(pVM, *pGCPhys)
388#endif
389 )
390 {
391 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
392 * not be informed about writes and keep bogus gst->shw mappings around.
393 */
394 pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
395 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
396 /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
397 * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
398 }
399 }
400
401 pgmUnlock(pVM);
402 return rc;
403}
404
405
406/**
407 * Requests the mapping of a guest page into ring-3, external threads.
408 *
409 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
410 * release it.
411 *
412 * This API will assume your intention is to write to the page, and will
413 * therefore replace shared and zero pages. If you do not intend to modify the
414 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
415 *
416 * @returns VBox status code.
417 * @retval VINF_SUCCESS on success.
418 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
419 * backing or if the page has any active access handlers. The caller
420 * must fall back on using PGMR3PhysWriteExternal.
421 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
422 *
423 * @param pVM The cross context VM structure.
424 * @param GCPhys The guest physical address of the page that should be mapped.
425 * @param ppv Where to store the address corresponding to GCPhys.
426 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
427 *
428 * @remark Avoid calling this API from within critical sections (other than the
429 * PGM one) because of the deadlock risk when we have to delegating the
430 * task to an EMT.
431 * @thread Any.
432 */
433VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
434{
435 AssertPtr(ppv);
436 AssertPtr(pLock);
437
438 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
439
440 int rc = pgmLock(pVM);
441 AssertRCReturn(rc, rc);
442
443 /*
444 * Query the Physical TLB entry for the page (may fail).
445 */
446 PPGMPAGEMAPTLBE pTlbe;
447 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
448 if (RT_SUCCESS(rc))
449 {
450 PPGMPAGE pPage = pTlbe->pPage;
451 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
452 rc = VERR_PGM_PHYS_PAGE_RESERVED;
453 else
454 {
455 /*
456 * If the page is shared, the zero page, or being write monitored
457 * it must be converted to an page that's writable if possible.
458 * We can only deal with write monitored pages here, the rest have
459 * to be on an EMT.
460 */
461 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
462 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
463#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
464 || pgmPoolIsDirtyPage(pVM, GCPhys)
465#endif
466 )
467 {
468 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
469 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
470#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
471 && !pgmPoolIsDirtyPage(pVM, GCPhys) /** @todo we're very likely doing this twice. */
472#endif
473 )
474 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
475 else
476 {
477 pgmUnlock(pVM);
478
479 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
480 pVM, &GCPhys, ppv, pLock);
481 }
482 }
483
484 /*
485 * Now, just perform the locking and calculate the return address.
486 */
487 PPGMPAGEMAP pMap = pTlbe->pMap;
488 if (pMap)
489 pMap->cRefs++;
490
491 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
492 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
493 {
494 if (cLocks == 0)
495 pVM->pgm.s.cWriteLockedPages++;
496 PGM_PAGE_INC_WRITE_LOCKS(pPage);
497 }
498 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
499 {
500 PGM_PAGE_INC_WRITE_LOCKS(pPage);
501 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
502 if (pMap)
503 pMap->cRefs++; /* Extra ref to prevent it from going away. */
504 }
505
506 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
507 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
508 pLock->pvMap = pMap;
509 }
510 }
511
512 pgmUnlock(pVM);
513 return rc;
514}
515
516
517/**
518 * Requests the mapping of a guest page into ring-3, external threads.
519 *
520 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
521 * release it.
522 *
523 * @returns VBox status code.
524 * @retval VINF_SUCCESS on success.
525 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
526 * backing or if the page as an active ALL access handler. The caller
527 * must fall back on using PGMPhysRead.
528 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
529 *
530 * @param pVM The cross context VM structure.
531 * @param GCPhys The guest physical address of the page that should be mapped.
532 * @param ppv Where to store the address corresponding to GCPhys.
533 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
534 *
535 * @remark Avoid calling this API from within critical sections (other than
536 * the PGM one) because of the deadlock risk.
537 * @thread Any.
538 */
539VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
540{
541 int rc = pgmLock(pVM);
542 AssertRCReturn(rc, rc);
543
544 /*
545 * Query the Physical TLB entry for the page (may fail).
546 */
547 PPGMPAGEMAPTLBE pTlbe;
548 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
549 if (RT_SUCCESS(rc))
550 {
551 PPGMPAGE pPage = pTlbe->pPage;
552#if 1
553 /* MMIO pages doesn't have any readable backing. */
554 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
555 rc = VERR_PGM_PHYS_PAGE_RESERVED;
556#else
557 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
558 rc = VERR_PGM_PHYS_PAGE_RESERVED;
559#endif
560 else
561 {
562 /*
563 * Now, just perform the locking and calculate the return address.
564 */
565 PPGMPAGEMAP pMap = pTlbe->pMap;
566 if (pMap)
567 pMap->cRefs++;
568
569 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
570 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
571 {
572 if (cLocks == 0)
573 pVM->pgm.s.cReadLockedPages++;
574 PGM_PAGE_INC_READ_LOCKS(pPage);
575 }
576 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
577 {
578 PGM_PAGE_INC_READ_LOCKS(pPage);
579 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
580 if (pMap)
581 pMap->cRefs++; /* Extra ref to prevent it from going away. */
582 }
583
584 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
585 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
586 pLock->pvMap = pMap;
587 }
588 }
589
590 pgmUnlock(pVM);
591 return rc;
592}
593
594
595/**
596 * Requests the mapping of multiple guest page into ring-3, external threads.
597 *
598 * When you're done with the pages, call PGMPhysBulkReleasePageMappingLock()
599 * ASAP to release them.
600 *
601 * This API will assume your intention is to write to the pages, and will
602 * therefore replace shared and zero pages. If you do not intend to modify the
603 * pages, use the PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal() API.
604 *
605 * @returns VBox status code.
606 * @retval VINF_SUCCESS on success.
607 * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
608 * backing or if any of the pages the page has any active access
609 * handlers. The caller must fall back on using PGMR3PhysWriteExternal.
610 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
611 * an invalid physical address.
612 *
613 * @param pVM The cross context VM structure.
614 * @param cPages Number of pages to lock.
615 * @param paGCPhysPages The guest physical address of the pages that
616 * should be mapped (@a cPages entries).
617 * @param papvPages Where to store the ring-3 mapping addresses
618 * corresponding to @a paGCPhysPages.
619 * @param paLocks Where to store the locking information that
620 * pfnPhysBulkReleasePageMappingLock needs (@a cPages
621 * in length).
622 *
623 * @remark Avoid calling this API from within critical sections (other than the
624 * PGM one) because of the deadlock risk when we have to delegating the
625 * task to an EMT.
626 * @thread Any.
627 */
628VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
629 void **papvPages, PPGMPAGEMAPLOCK paLocks)
630{
631 Assert(cPages > 0);
632 AssertPtr(papvPages);
633 AssertPtr(paLocks);
634
635 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
636
637 int rc = pgmLock(pVM);
638 AssertRCReturn(rc, rc);
639
640 /*
641 * Lock the pages one by one.
642 * The loop body is similar to PGMR3PhysGCPhys2CCPtrExternal.
643 */
644 int32_t cNextYield = 128;
645 uint32_t iPage;
646 for (iPage = 0; iPage < cPages; iPage++)
647 {
648 if (--cNextYield > 0)
649 { /* likely */ }
650 else
651 {
652 pgmUnlock(pVM);
653 ASMNopPause();
654 pgmLock(pVM);
655 cNextYield = 128;
656 }
657
658 /*
659 * Query the Physical TLB entry for the page (may fail).
660 */
661 PPGMPAGEMAPTLBE pTlbe;
662 rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
663 if (RT_SUCCESS(rc))
664 { }
665 else
666 break;
667 PPGMPAGE pPage = pTlbe->pPage;
668
669 /*
670 * No MMIO or active access handlers.
671 */
672 if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
673 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
674 { }
675 else
676 {
677 rc = VERR_PGM_PHYS_PAGE_RESERVED;
678 break;
679 }
680
681 /*
682 * The page must be in the allocated state and not be a dirty pool page.
683 * We can handle converting a write monitored page to an allocated one, but
684 * anything more complicated must be delegated to an EMT.
685 */
686 bool fDelegateToEmt = false;
687 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
688#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
689 fDelegateToEmt = pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]);
690#else
691 fDelegateToEmt = false;
692#endif
693 else if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
694 {
695#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
696 if (!pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]))
697 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, paGCPhysPages[iPage]);
698 else
699 fDelegateToEmt = true;
700#endif
701 }
702 else
703 fDelegateToEmt = true;
704 if (!fDelegateToEmt)
705 { }
706 else
707 {
708 /* We could do this delegation in bulk, but considered too much work vs gain. */
709 pgmUnlock(pVM);
710 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
711 pVM, &paGCPhysPages[iPage], &papvPages[iPage], &paLocks[iPage]);
712 pgmLock(pVM);
713 if (RT_FAILURE(rc))
714 break;
715 cNextYield = 128;
716 }
717
718 /*
719 * Now, just perform the locking and address calculation.
720 */
721 PPGMPAGEMAP pMap = pTlbe->pMap;
722 if (pMap)
723 pMap->cRefs++;
724
725 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
726 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
727 {
728 if (cLocks == 0)
729 pVM->pgm.s.cWriteLockedPages++;
730 PGM_PAGE_INC_WRITE_LOCKS(pPage);
731 }
732 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
733 {
734 PGM_PAGE_INC_WRITE_LOCKS(pPage);
735 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", paGCPhysPages[iPage], pPage));
736 if (pMap)
737 pMap->cRefs++; /* Extra ref to prevent it from going away. */
738 }
739
740 papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
741 paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
742 paLocks[iPage].pvMap = pMap;
743 }
744
745 pgmUnlock(pVM);
746
747 /*
748 * On failure we must unlock any pages we managed to get already.
749 */
750 if (RT_FAILURE(rc) && iPage > 0)
751 PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
752
753 return rc;
754}
755
756
757/**
758 * Requests the mapping of multiple guest page into ring-3, for reading only,
759 * external threads.
760 *
761 * When you're done with the pages, call PGMPhysReleasePageMappingLock() ASAP
762 * to release them.
763 *
764 * @returns VBox status code.
765 * @retval VINF_SUCCESS on success.
766 * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
767 * backing or if any of the pages the page has an active ALL access
768 * handler. The caller must fall back on using PGMR3PhysWriteExternal.
769 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
770 * an invalid physical address.
771 *
772 * @param pVM The cross context VM structure.
773 * @param cPages Number of pages to lock.
774 * @param paGCPhysPages The guest physical address of the pages that
775 * should be mapped (@a cPages entries).
776 * @param papvPages Where to store the ring-3 mapping addresses
777 * corresponding to @a paGCPhysPages.
778 * @param paLocks Where to store the lock information that
779 * pfnPhysReleasePageMappingLock needs (@a cPages
780 * in length).
781 *
782 * @remark Avoid calling this API from within critical sections (other than
783 * the PGM one) because of the deadlock risk.
784 * @thread Any.
785 */
786VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
787 void const **papvPages, PPGMPAGEMAPLOCK paLocks)
788{
789 Assert(cPages > 0);
790 AssertPtr(papvPages);
791 AssertPtr(paLocks);
792
793 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
794
795 int rc = pgmLock(pVM);
796 AssertRCReturn(rc, rc);
797
798 /*
799 * Lock the pages one by one.
800 * The loop body is similar to PGMR3PhysGCPhys2CCPtrReadOnlyExternal.
801 */
802 int32_t cNextYield = 256;
803 uint32_t iPage;
804 for (iPage = 0; iPage < cPages; iPage++)
805 {
806 if (--cNextYield > 0)
807 { /* likely */ }
808 else
809 {
810 pgmUnlock(pVM);
811 ASMNopPause();
812 pgmLock(pVM);
813 cNextYield = 256;
814 }
815
816 /*
817 * Query the Physical TLB entry for the page (may fail).
818 */
819 PPGMPAGEMAPTLBE pTlbe;
820 rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
821 if (RT_SUCCESS(rc))
822 { }
823 else
824 break;
825 PPGMPAGE pPage = pTlbe->pPage;
826
827 /*
828 * No MMIO or active all access handlers, everything else can be accessed.
829 */
830 if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
831 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
832 { }
833 else
834 {
835 rc = VERR_PGM_PHYS_PAGE_RESERVED;
836 break;
837 }
838
839 /*
840 * Now, just perform the locking and address calculation.
841 */
842 PPGMPAGEMAP pMap = pTlbe->pMap;
843 if (pMap)
844 pMap->cRefs++;
845
846 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
847 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
848 {
849 if (cLocks == 0)
850 pVM->pgm.s.cReadLockedPages++;
851 PGM_PAGE_INC_READ_LOCKS(pPage);
852 }
853 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
854 {
855 PGM_PAGE_INC_READ_LOCKS(pPage);
856 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", paGCPhysPages[iPage], pPage));
857 if (pMap)
858 pMap->cRefs++; /* Extra ref to prevent it from going away. */
859 }
860
861 papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
862 paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
863 paLocks[iPage].pvMap = pMap;
864 }
865
866 pgmUnlock(pVM);
867
868 /*
869 * On failure we must unlock any pages we managed to get already.
870 */
871 if (RT_FAILURE(rc) && iPage > 0)
872 PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
873
874 return rc;
875}
876
877
878#define MAKE_LEAF(a_pNode) \
879 do { \
880 (a_pNode)->pLeftR3 = NIL_RTR3PTR; \
881 (a_pNode)->pRightR3 = NIL_RTR3PTR; \
882 (a_pNode)->pLeftR0 = NIL_RTR0PTR; \
883 (a_pNode)->pRightR0 = NIL_RTR0PTR; \
884 } while (0)
885
886#define INSERT_LEFT(a_pParent, a_pNode) \
887 do { \
888 (a_pParent)->pLeftR3 = (a_pNode); \
889 (a_pParent)->pLeftR0 = (a_pNode)->pSelfR0; \
890 } while (0)
891#define INSERT_RIGHT(a_pParent, a_pNode) \
892 do { \
893 (a_pParent)->pRightR3 = (a_pNode); \
894 (a_pParent)->pRightR0 = (a_pNode)->pSelfR0; \
895 } while (0)
896
897
898/**
899 * Recursive tree builder.
900 *
901 * @param ppRam Pointer to the iterator variable.
902 * @param iDepth The current depth. Inserts a leaf node if 0.
903 */
904static PPGMRAMRANGE pgmR3PhysRebuildRamRangeSearchTreesRecursively(PPGMRAMRANGE *ppRam, int iDepth)
905{
906 PPGMRAMRANGE pRam;
907 if (iDepth <= 0)
908 {
909 /*
910 * Leaf node.
911 */
912 pRam = *ppRam;
913 if (pRam)
914 {
915 *ppRam = pRam->pNextR3;
916 MAKE_LEAF(pRam);
917 }
918 }
919 else
920 {
921
922 /*
923 * Intermediate node.
924 */
925 PPGMRAMRANGE pLeft = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
926
927 pRam = *ppRam;
928 if (!pRam)
929 return pLeft;
930 *ppRam = pRam->pNextR3;
931 MAKE_LEAF(pRam);
932 INSERT_LEFT(pRam, pLeft);
933
934 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
935 if (pRight)
936 INSERT_RIGHT(pRam, pRight);
937 }
938 return pRam;
939}
940
941
942/**
943 * Rebuilds the RAM range search trees.
944 *
945 * @param pVM The cross context VM structure.
946 */
947static void pgmR3PhysRebuildRamRangeSearchTrees(PVM pVM)
948{
949
950 /*
951 * Create the reasonably balanced tree in a sequential fashion.
952 * For simplicity (laziness) we use standard recursion here.
953 */
954 int iDepth = 0;
955 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
956 PPGMRAMRANGE pRoot = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, 0);
957 while (pRam)
958 {
959 PPGMRAMRANGE pLeft = pRoot;
960
961 pRoot = pRam;
962 pRam = pRam->pNextR3;
963 MAKE_LEAF(pRoot);
964 INSERT_LEFT(pRoot, pLeft);
965
966 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, iDepth);
967 if (pRight)
968 INSERT_RIGHT(pRoot, pRight);
969 /** @todo else: rotate the tree. */
970
971 iDepth++;
972 }
973
974 pVM->pgm.s.pRamRangeTreeR3 = pRoot;
975 pVM->pgm.s.pRamRangeTreeR0 = pRoot ? pRoot->pSelfR0 : NIL_RTR0PTR;
976
977#ifdef VBOX_STRICT
978 /*
979 * Verify that the above code works.
980 */
981 unsigned cRanges = 0;
982 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
983 cRanges++;
984 Assert(cRanges > 0);
985
986 unsigned cMaxDepth = ASMBitLastSetU32(cRanges);
987 if ((1U << cMaxDepth) < cRanges)
988 cMaxDepth++;
989
990 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
991 {
992 unsigned cDepth = 0;
993 PPGMRAMRANGE pRam2 = pVM->pgm.s.pRamRangeTreeR3;
994 for (;;)
995 {
996 if (pRam == pRam2)
997 break;
998 Assert(pRam2);
999 if (pRam->GCPhys < pRam2->GCPhys)
1000 pRam2 = pRam2->pLeftR3;
1001 else
1002 pRam2 = pRam2->pRightR3;
1003 }
1004 AssertMsg(cDepth <= cMaxDepth, ("cDepth=%d cMaxDepth=%d\n", cDepth, cMaxDepth));
1005 }
1006#endif /* VBOX_STRICT */
1007}
1008
1009#undef MAKE_LEAF
1010#undef INSERT_LEFT
1011#undef INSERT_RIGHT
1012
1013/**
1014 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
1015 *
1016 * Called when anything was relocated.
1017 *
1018 * @param pVM The cross context VM structure.
1019 */
1020void pgmR3PhysRelinkRamRanges(PVM pVM)
1021{
1022 PPGMRAMRANGE pCur;
1023
1024#ifdef VBOX_STRICT
1025 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1026 {
1027 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
1028 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
1029 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1030 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
1031 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
1032 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3)
1033 Assert( pCur2 == pCur
1034 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
1035 }
1036#endif
1037
1038 pCur = pVM->pgm.s.pRamRangesXR3;
1039 if (pCur)
1040 {
1041 pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0;
1042
1043 for (; pCur->pNextR3; pCur = pCur->pNextR3)
1044 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
1045
1046 Assert(pCur->pNextR0 == NIL_RTR0PTR);
1047 }
1048 else
1049 {
1050 Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR);
1051 }
1052 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
1053
1054 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
1055}
1056
1057
1058/**
1059 * Links a new RAM range into the list.
1060 *
1061 * @param pVM The cross context VM structure.
1062 * @param pNew Pointer to the new list entry.
1063 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
1064 */
1065static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
1066{
1067 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
1068 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
1069
1070 pgmLock(pVM);
1071
1072 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3;
1073 pNew->pNextR3 = pRam;
1074 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
1075
1076 if (pPrev)
1077 {
1078 pPrev->pNextR3 = pNew;
1079 pPrev->pNextR0 = pNew->pSelfR0;
1080 }
1081 else
1082 {
1083 pVM->pgm.s.pRamRangesXR3 = pNew;
1084 pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0;
1085 }
1086 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
1087
1088 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
1089 pgmUnlock(pVM);
1090}
1091
1092
1093/**
1094 * Unlink an existing RAM range from the list.
1095 *
1096 * @param pVM The cross context VM structure.
1097 * @param pRam Pointer to the new list entry.
1098 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
1099 */
1100static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
1101{
1102 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam);
1103 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
1104
1105 pgmLock(pVM);
1106
1107 PPGMRAMRANGE pNext = pRam->pNextR3;
1108 if (pPrev)
1109 {
1110 pPrev->pNextR3 = pNext;
1111 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
1112 }
1113 else
1114 {
1115 Assert(pVM->pgm.s.pRamRangesXR3 == pRam);
1116 pVM->pgm.s.pRamRangesXR3 = pNext;
1117 pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
1118 }
1119 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
1120
1121 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
1122 pgmUnlock(pVM);
1123}
1124
1125
1126/**
1127 * Unlink an existing RAM range from the list.
1128 *
1129 * @param pVM The cross context VM structure.
1130 * @param pRam Pointer to the new list entry.
1131 */
1132static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
1133{
1134 pgmLock(pVM);
1135
1136 /* find prev. */
1137 PPGMRAMRANGE pPrev = NULL;
1138 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3;
1139 while (pCur != pRam)
1140 {
1141 pPrev = pCur;
1142 pCur = pCur->pNextR3;
1143 }
1144 AssertFatal(pCur);
1145
1146 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
1147 pgmUnlock(pVM);
1148}
1149
1150
1151/**
1152 * Frees a range of pages, replacing them with ZERO pages of the specified type.
1153 *
1154 * @returns VBox status code.
1155 * @param pVM The cross context VM structure.
1156 * @param pRam The RAM range in which the pages resides.
1157 * @param GCPhys The address of the first page.
1158 * @param GCPhysLast The address of the last page.
1159 * @param enmType The page type to replace then with.
1160 */
1161static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPAGETYPE enmType)
1162{
1163 PGM_LOCK_ASSERT_OWNER(pVM);
1164 uint32_t cPendingPages = 0;
1165 PGMMFREEPAGESREQ pReq;
1166 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1167 AssertLogRelRCReturn(rc, rc);
1168
1169 /* Iterate the pages. */
1170 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1171 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
1172 while (cPagesLeft-- > 0)
1173 {
1174 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys, enmType);
1175 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1176
1177 PGM_PAGE_SET_TYPE(pVM, pPageDst, enmType);
1178
1179 GCPhys += PAGE_SIZE;
1180 pPageDst++;
1181 }
1182
1183 if (cPendingPages)
1184 {
1185 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1186 AssertLogRelRCReturn(rc, rc);
1187 }
1188 GMMR3FreePagesCleanup(pReq);
1189
1190 return rc;
1191}
1192
1193#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
1194
1195/**
1196 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
1197 *
1198 * This is only called on one of the EMTs while the other ones are waiting for
1199 * it to complete this function.
1200 *
1201 * @returns VINF_SUCCESS (VBox strict status code).
1202 * @param pVM The cross context VM structure.
1203 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
1204 * @param pvUser User parameter
1205 */
1206static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
1207{
1208 uintptr_t *paUser = (uintptr_t *)pvUser;
1209 bool fInflate = !!paUser[0];
1210 unsigned cPages = paUser[1];
1211 RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
1212 uint32_t cPendingPages = 0;
1213 PGMMFREEPAGESREQ pReq;
1214 int rc;
1215
1216 Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
1217 pgmLock(pVM);
1218
1219 if (fInflate)
1220 {
1221 /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
1222 pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
1223
1224 /* Replace pages with ZERO pages. */
1225 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1226 if (RT_FAILURE(rc))
1227 {
1228 pgmUnlock(pVM);
1229 AssertLogRelRC(rc);
1230 return rc;
1231 }
1232
1233 /* Iterate the pages. */
1234 for (unsigned i = 0; i < cPages; i++)
1235 {
1236 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
1237 if ( pPage == NULL
1238 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
1239 {
1240 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
1241 break;
1242 }
1243
1244 LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
1245
1246 /* Flush the shadow PT if this page was previously used as a guest page table. */
1247 pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
1248
1249 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i], (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
1250 if (RT_FAILURE(rc))
1251 {
1252 pgmUnlock(pVM);
1253 AssertLogRelRC(rc);
1254 return rc;
1255 }
1256 Assert(PGM_PAGE_IS_ZERO(pPage));
1257 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
1258 }
1259
1260 if (cPendingPages)
1261 {
1262 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1263 if (RT_FAILURE(rc))
1264 {
1265 pgmUnlock(pVM);
1266 AssertLogRelRC(rc);
1267 return rc;
1268 }
1269 }
1270 GMMR3FreePagesCleanup(pReq);
1271 }
1272 else
1273 {
1274 /* Iterate the pages. */
1275 for (unsigned i = 0; i < cPages; i++)
1276 {
1277 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
1278 AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
1279
1280 LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
1281
1282 Assert(PGM_PAGE_IS_BALLOONED(pPage));
1283
1284 /* Change back to zero page. (NEM does not need to be informed.) */
1285 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1286 }
1287
1288 /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
1289 }
1290
1291 /* Notify GMM about the balloon change. */
1292 rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
1293 if (RT_SUCCESS(rc))
1294 {
1295 if (!fInflate)
1296 {
1297 Assert(pVM->pgm.s.cBalloonedPages >= cPages);
1298 pVM->pgm.s.cBalloonedPages -= cPages;
1299 }
1300 else
1301 pVM->pgm.s.cBalloonedPages += cPages;
1302 }
1303
1304 pgmUnlock(pVM);
1305
1306 /* Flush the recompiler's TLB as well. */
1307 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1308 CPUMSetChangedFlags(pVM->apCpusR3[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1309
1310 AssertLogRelRC(rc);
1311 return rc;
1312}
1313
1314
1315/**
1316 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
1317 *
1318 * @returns VBox status code.
1319 * @param pVM The cross context VM structure.
1320 * @param fInflate Inflate or deflate memory balloon
1321 * @param cPages Number of pages to free
1322 * @param paPhysPage Array of guest physical addresses
1323 */
1324static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1325{
1326 uintptr_t paUser[3];
1327
1328 paUser[0] = fInflate;
1329 paUser[1] = cPages;
1330 paUser[2] = (uintptr_t)paPhysPage;
1331 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1332 AssertRC(rc);
1333
1334 /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
1335 RTMemFree(paPhysPage);
1336}
1337
1338#endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
1339
1340/**
1341 * Inflate or deflate a memory balloon
1342 *
1343 * @returns VBox status code.
1344 * @param pVM The cross context VM structure.
1345 * @param fInflate Inflate or deflate memory balloon
1346 * @param cPages Number of pages to free
1347 * @param paPhysPage Array of guest physical addresses
1348 */
1349VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1350{
1351 /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
1352#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
1353 int rc;
1354
1355 /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
1356 AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
1357
1358 /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
1359 * In the SMP case we post a request packet to postpone the job.
1360 */
1361 if (pVM->cCpus > 1)
1362 {
1363 unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
1364 RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
1365 AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
1366
1367 memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
1368
1369 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
1370 AssertRC(rc);
1371 }
1372 else
1373 {
1374 uintptr_t paUser[3];
1375
1376 paUser[0] = fInflate;
1377 paUser[1] = cPages;
1378 paUser[2] = (uintptr_t)paPhysPage;
1379 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1380 AssertRC(rc);
1381 }
1382 return rc;
1383
1384#else
1385 NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
1386 return VERR_NOT_IMPLEMENTED;
1387#endif
1388}
1389
1390
1391/**
1392 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
1393 * physical RAM.
1394 *
1395 * This is only called on one of the EMTs while the other ones are waiting for
1396 * it to complete this function.
1397 *
1398 * @returns VINF_SUCCESS (VBox strict status code).
1399 * @param pVM The cross context VM structure.
1400 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
1401 * @param pvUser User parameter, unused.
1402 */
1403static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
1404{
1405 int rc = VINF_SUCCESS;
1406 NOREF(pvUser); NOREF(pVCpu);
1407
1408 pgmLock(pVM);
1409#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1410 pgmPoolResetDirtyPages(pVM);
1411#endif
1412
1413 /** @todo pointless to write protect the physical page pointed to by RSP. */
1414
1415 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
1416 pRam;
1417 pRam = pRam->CTX_SUFF(pNext))
1418 {
1419 uint32_t cPages = pRam->cb >> PAGE_SHIFT;
1420 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1421 {
1422 PPGMPAGE pPage = &pRam->aPages[iPage];
1423 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1424
1425 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
1426 || enmPageType == PGMPAGETYPE_MMIO2)
1427 {
1428 /*
1429 * A RAM page.
1430 */
1431 switch (PGM_PAGE_GET_STATE(pPage))
1432 {
1433 case PGM_PAGE_STATE_ALLOCATED:
1434 /** @todo Optimize this: Don't always re-enable write
1435 * monitoring if the page is known to be very busy. */
1436 if (PGM_PAGE_IS_WRITTEN_TO(pPage))
1437 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1438
1439 pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1440 break;
1441
1442 case PGM_PAGE_STATE_SHARED:
1443 AssertFailed();
1444 break;
1445
1446 case PGM_PAGE_STATE_WRITE_MONITORED: /* nothing to change. */
1447 default:
1448 break;
1449 }
1450 }
1451 }
1452 }
1453 pgmR3PoolWriteProtectPages(pVM);
1454 PGM_INVL_ALL_VCPU_TLBS(pVM);
1455 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1456 CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1457
1458 pgmUnlock(pVM);
1459 return rc;
1460}
1461
1462/**
1463 * Protect all physical RAM to monitor writes
1464 *
1465 * @returns VBox status code.
1466 * @param pVM The cross context VM structure.
1467 */
1468VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
1469{
1470 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1471
1472 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
1473 AssertRC(rc);
1474 return rc;
1475}
1476
1477
1478/**
1479 * Gets the number of ram ranges.
1480 *
1481 * @returns Number of ram ranges. Returns UINT32_MAX if @a pVM is invalid.
1482 * @param pVM The cross context VM structure.
1483 */
1484VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM)
1485{
1486 VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
1487
1488 pgmLock(pVM);
1489 uint32_t cRamRanges = 0;
1490 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext))
1491 cRamRanges++;
1492 pgmUnlock(pVM);
1493 return cRamRanges;
1494}
1495
1496
1497/**
1498 * Get information about a range.
1499 *
1500 * @returns VINF_SUCCESS or VERR_OUT_OF_RANGE.
1501 * @param pVM The cross context VM structure.
1502 * @param iRange The ordinal of the range.
1503 * @param pGCPhysStart Where to return the start of the range. Optional.
1504 * @param pGCPhysLast Where to return the address of the last byte in the
1505 * range. Optional.
1506 * @param ppszDesc Where to return the range description. Optional.
1507 * @param pfIsMmio Where to indicate that this is a pure MMIO range.
1508 * Optional.
1509 */
1510VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
1511 const char **ppszDesc, bool *pfIsMmio)
1512{
1513 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1514
1515 pgmLock(pVM);
1516 uint32_t iCurRange = 0;
1517 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++)
1518 if (iCurRange == iRange)
1519 {
1520 if (pGCPhysStart)
1521 *pGCPhysStart = pCur->GCPhys;
1522 if (pGCPhysLast)
1523 *pGCPhysLast = pCur->GCPhysLast;
1524 if (ppszDesc)
1525 *ppszDesc = pCur->pszDesc;
1526 if (pfIsMmio)
1527 *pfIsMmio = !!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO);
1528
1529 pgmUnlock(pVM);
1530 return VINF_SUCCESS;
1531 }
1532 pgmUnlock(pVM);
1533 return VERR_OUT_OF_RANGE;
1534}
1535
1536
1537/**
1538 * Query the amount of free memory inside VMMR0
1539 *
1540 * @returns VBox status code.
1541 * @param pUVM The user mode VM handle.
1542 * @param pcbAllocMem Where to return the amount of memory allocated
1543 * by VMs.
1544 * @param pcbFreeMem Where to return the amount of memory that is
1545 * allocated from the host but not currently used
1546 * by any VMs.
1547 * @param pcbBallonedMem Where to return the sum of memory that is
1548 * currently ballooned by the VMs.
1549 * @param pcbSharedMem Where to return the amount of memory that is
1550 * currently shared.
1551 */
1552VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
1553 uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
1554{
1555 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1556 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
1557
1558 uint64_t cAllocPages = 0;
1559 uint64_t cFreePages = 0;
1560 uint64_t cBalloonPages = 0;
1561 uint64_t cSharedPages = 0;
1562 int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
1563 AssertRCReturn(rc, rc);
1564
1565 if (pcbAllocMem)
1566 *pcbAllocMem = cAllocPages * _4K;
1567
1568 if (pcbFreeMem)
1569 *pcbFreeMem = cFreePages * _4K;
1570
1571 if (pcbBallonedMem)
1572 *pcbBallonedMem = cBalloonPages * _4K;
1573
1574 if (pcbSharedMem)
1575 *pcbSharedMem = cSharedPages * _4K;
1576
1577 Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
1578 cAllocPages, cFreePages, cBalloonPages, cSharedPages));
1579 return VINF_SUCCESS;
1580}
1581
1582
1583/**
1584 * Query memory stats for the VM.
1585 *
1586 * @returns VBox status code.
1587 * @param pUVM The user mode VM handle.
1588 * @param pcbTotalMem Where to return total amount memory the VM may
1589 * possibly use.
1590 * @param pcbPrivateMem Where to return the amount of private memory
1591 * currently allocated.
1592 * @param pcbSharedMem Where to return the amount of actually shared
1593 * memory currently used by the VM.
1594 * @param pcbZeroMem Where to return the amount of memory backed by
1595 * zero pages.
1596 *
1597 * @remarks The total mem is normally larger than the sum of the three
1598 * components. There are two reasons for this, first the amount of
1599 * shared memory is what we're sure is shared instead of what could
1600 * possibly be shared with someone. Secondly, because the total may
1601 * include some pure MMIO pages that doesn't go into any of the three
1602 * sub-counts.
1603 *
1604 * @todo Why do we return reused shared pages instead of anything that could
1605 * potentially be shared? Doesn't this mean the first VM gets a much
1606 * lower number of shared pages?
1607 */
1608VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
1609 uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
1610{
1611 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1612 PVM pVM = pUVM->pVM;
1613 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1614
1615 if (pcbTotalMem)
1616 *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * PAGE_SIZE;
1617
1618 if (pcbPrivateMem)
1619 *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * PAGE_SIZE;
1620
1621 if (pcbSharedMem)
1622 *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * PAGE_SIZE;
1623
1624 if (pcbZeroMem)
1625 *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * PAGE_SIZE;
1626
1627 Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
1628 return VINF_SUCCESS;
1629}
1630
1631
1632/**
1633 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
1634 *
1635 * @param pVM The cross context VM structure.
1636 * @param pNew The new RAM range.
1637 * @param GCPhys The address of the RAM range.
1638 * @param GCPhysLast The last address of the RAM range.
1639 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
1640 * if in HMA.
1641 * @param R0PtrNew Ditto for R0.
1642 * @param pszDesc The description.
1643 * @param pPrev The previous RAM range (for linking).
1644 */
1645static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
1646 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
1647{
1648 /*
1649 * Initialize the range.
1650 */
1651 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
1652 pNew->GCPhys = GCPhys;
1653 pNew->GCPhysLast = GCPhysLast;
1654 pNew->cb = GCPhysLast - GCPhys + 1;
1655 pNew->pszDesc = pszDesc;
1656 pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
1657 pNew->pvR3 = NULL;
1658 pNew->paLSPages = NULL;
1659
1660 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
1661 RTGCPHYS iPage = cPages;
1662 while (iPage-- > 0)
1663 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
1664
1665 /* Update the page count stats. */
1666 pVM->pgm.s.cZeroPages += cPages;
1667 pVM->pgm.s.cAllPages += cPages;
1668
1669 /*
1670 * Link it.
1671 */
1672 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
1673}
1674
1675
1676#ifndef PGM_WITHOUT_MAPPINGS
1677/**
1678 * @callback_method_impl{FNPGMRELOCATE, Relocate a floating RAM range.}
1679 * @sa pgmR3PhysMMIO2ExRangeRelocate
1680 */
1681static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
1682 PGMRELOCATECALL enmMode, void *pvUser)
1683{
1684 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
1685 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
1686 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE); RT_NOREF_PV(GCPtrOld);
1687
1688 switch (enmMode)
1689 {
1690 case PGMRELOCATECALL_SUGGEST:
1691 return true;
1692
1693 case PGMRELOCATECALL_RELOCATE:
1694 {
1695 /*
1696 * Update myself, then relink all the ranges and flush the RC TLB.
1697 */
1698 pgmLock(pVM);
1699
1700 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
1701
1702 pgmR3PhysRelinkRamRanges(pVM);
1703 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
1704 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
1705
1706 pgmUnlock(pVM);
1707 return true;
1708 }
1709
1710 default:
1711 AssertFailedReturn(false);
1712 }
1713}
1714#endif /* !PGM_WITHOUT_MAPPINGS */
1715
1716
1717/**
1718 * PGMR3PhysRegisterRam worker that registers a high chunk.
1719 *
1720 * @returns VBox status code.
1721 * @param pVM The cross context VM structure.
1722 * @param GCPhys The address of the RAM.
1723 * @param cRamPages The number of RAM pages to register.
1724 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
1725 * @param iChunk The chunk number.
1726 * @param pszDesc The RAM range description.
1727 * @param ppPrev Previous RAM range pointer. In/Out.
1728 */
1729static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
1730 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
1731 PPGMRAMRANGE *ppPrev)
1732{
1733 const char *pszDescChunk = iChunk == 0
1734 ? pszDesc
1735 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
1736 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
1737
1738 /*
1739 * Allocate memory for the new chunk.
1740 */
1741 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
1742 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
1743 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
1744 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
1745 void *pvChunk = NULL;
1746 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages);
1747 if (RT_SUCCESS(rc))
1748 {
1749 Assert(R0PtrChunk != NIL_RTR0PTR);
1750 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
1751
1752 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
1753
1754 /*
1755 * Create a mapping and map the pages into it.
1756 * We push these in below the HMA.
1757 */
1758 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
1759#ifndef PGM_WITHOUT_MAPPINGS
1760 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
1761 if (RT_SUCCESS(rc))
1762#endif /* !PGM_WITHOUT_MAPPINGS */
1763 {
1764 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
1765
1766 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
1767#ifndef PGM_WITHOUT_MAPPINGS
1768 RTGCPTR GCPtrPage = GCPtrChunk;
1769 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
1770 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
1771 if (RT_SUCCESS(rc))
1772#endif /* !PGM_WITHOUT_MAPPINGS */
1773 {
1774 /*
1775 * Ok, init and link the range.
1776 */
1777 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
1778 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
1779 *ppPrev = pNew;
1780 }
1781 }
1782
1783 if (RT_FAILURE(rc))
1784 SUPR3PageFreeEx(pvChunk, cChunkPages);
1785 }
1786
1787 RTMemTmpFree(paChunkPages);
1788 return rc;
1789}
1790
1791
1792/**
1793 * Sets up a range RAM.
1794 *
1795 * This will check for conflicting registrations, make a resource
1796 * reservation for the memory (with GMM), and setup the per-page
1797 * tracking structures (PGMPAGE).
1798 *
1799 * @returns VBox status code.
1800 * @param pVM The cross context VM structure.
1801 * @param GCPhys The physical address of the RAM.
1802 * @param cb The size of the RAM.
1803 * @param pszDesc The description - not copied, so, don't free or change it.
1804 */
1805VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
1806{
1807 /*
1808 * Validate input.
1809 */
1810 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
1811 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1812 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1813 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
1814 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1815 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
1816 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1817 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1818
1819 pgmLock(pVM);
1820
1821 /*
1822 * Find range location and check for conflicts.
1823 * (We don't lock here because the locking by EMT is only required on update.)
1824 */
1825 PPGMRAMRANGE pPrev = NULL;
1826 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
1827 while (pRam && GCPhysLast >= pRam->GCPhys)
1828 {
1829 if ( GCPhysLast >= pRam->GCPhys
1830 && GCPhys <= pRam->GCPhysLast)
1831 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1832 GCPhys, GCPhysLast, pszDesc,
1833 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1834 VERR_PGM_RAM_CONFLICT);
1835
1836 /* next */
1837 pPrev = pRam;
1838 pRam = pRam->pNextR3;
1839 }
1840
1841 /*
1842 * Register it with GMM (the API bitches).
1843 */
1844 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
1845 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
1846 if (RT_FAILURE(rc))
1847 {
1848 pgmUnlock(pVM);
1849 return rc;
1850 }
1851
1852 if ( GCPhys >= _4G
1853 && cPages > 256)
1854 {
1855 /*
1856 * The PGMRAMRANGE structures for the high memory can get very big.
1857 * In order to avoid SUPR3PageAllocEx allocation failures due to the
1858 * allocation size limit there and also to avoid being unable to find
1859 * guest mapping space for them, we split this memory up into 4MB in
1860 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
1861 * mode.
1862 *
1863 * The first and last page of each mapping are guard pages and marked
1864 * not-present. So, we've got 4186112 and 16769024 bytes available for
1865 * the PGMRAMRANGE structure.
1866 *
1867 * Note! The sizes used here will influence the saved state.
1868 */
1869 uint32_t cbChunk = 16U*_1M;
1870 uint32_t cPagesPerChunk = 1048048; /* max ~1048059 */
1871 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
1872 AssertRelease(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
1873
1874 RTGCPHYS cPagesLeft = cPages;
1875 RTGCPHYS GCPhysChunk = GCPhys;
1876 uint32_t iChunk = 0;
1877 while (cPagesLeft > 0)
1878 {
1879 uint32_t cPagesInChunk = cPagesLeft;
1880 if (cPagesInChunk > cPagesPerChunk)
1881 cPagesInChunk = cPagesPerChunk;
1882
1883 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1884 AssertRCReturn(rc, rc);
1885
1886 /* advance */
1887 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1888 cPagesLeft -= cPagesInChunk;
1889 iChunk++;
1890 }
1891 }
1892 else
1893 {
1894 /*
1895 * Allocate, initialize and link the new RAM range.
1896 */
1897 const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
1898 PPGMRAMRANGE pNew;
1899 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1900 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1901
1902 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1903 }
1904 pgmPhysInvalidatePageMapTLB(pVM);
1905
1906 /*
1907 * Notify NEM while holding the lock (experimental) and REM without (like always).
1908 */
1909 rc = NEMR3NotifyPhysRamRegister(pVM, GCPhys, cb);
1910 pgmUnlock(pVM);
1911 return rc;
1912}
1913
1914
1915/**
1916 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
1917 *
1918 * We do this late in the init process so that all the ROM and MMIO ranges have
1919 * been registered already and we don't go wasting memory on them.
1920 *
1921 * @returns VBox status code.
1922 *
1923 * @param pVM The cross context VM structure.
1924 */
1925int pgmR3PhysRamPreAllocate(PVM pVM)
1926{
1927 Assert(pVM->pgm.s.fRamPreAlloc);
1928 Log(("pgmR3PhysRamPreAllocate: enter\n"));
1929
1930 /*
1931 * Walk the RAM ranges and allocate all RAM pages, halt at
1932 * the first allocation error.
1933 */
1934 uint64_t cPages = 0;
1935 uint64_t NanoTS = RTTimeNanoTS();
1936 pgmLock(pVM);
1937 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
1938 {
1939 PPGMPAGE pPage = &pRam->aPages[0];
1940 RTGCPHYS GCPhys = pRam->GCPhys;
1941 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
1942 while (cLeft-- > 0)
1943 {
1944 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1945 {
1946 switch (PGM_PAGE_GET_STATE(pPage))
1947 {
1948 case PGM_PAGE_STATE_ZERO:
1949 {
1950 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
1951 if (RT_FAILURE(rc))
1952 {
1953 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
1954 pgmUnlock(pVM);
1955 return rc;
1956 }
1957 cPages++;
1958 break;
1959 }
1960
1961 case PGM_PAGE_STATE_BALLOONED:
1962 case PGM_PAGE_STATE_ALLOCATED:
1963 case PGM_PAGE_STATE_WRITE_MONITORED:
1964 case PGM_PAGE_STATE_SHARED:
1965 /* nothing to do here. */
1966 break;
1967 }
1968 }
1969
1970 /* next */
1971 pPage++;
1972 GCPhys += PAGE_SIZE;
1973 }
1974 }
1975 pgmUnlock(pVM);
1976 NanoTS = RTTimeNanoTS() - NanoTS;
1977
1978 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
1979 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
1980 return VINF_SUCCESS;
1981}
1982
1983
1984/**
1985 * Checks shared page checksums.
1986 *
1987 * @param pVM The cross context VM structure.
1988 */
1989void pgmR3PhysAssertSharedPageChecksums(PVM pVM)
1990{
1991#ifdef VBOX_STRICT
1992 pgmLock(pVM);
1993
1994 if (pVM->pgm.s.cSharedPages > 0)
1995 {
1996 /*
1997 * Walk the ram ranges.
1998 */
1999 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2000 {
2001 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2002 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2003
2004 while (iPage-- > 0)
2005 {
2006 PPGMPAGE pPage = &pRam->aPages[iPage];
2007 if (PGM_PAGE_IS_SHARED(pPage))
2008 {
2009 uint32_t u32Checksum = pPage->s.u2Unused0/* | ((uint32_t)pPage->s.u2Unused1 << 8)*/;
2010 if (!u32Checksum)
2011 {
2012 RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2013 void const *pvPage;
2014 int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhysPage, &pvPage);
2015 if (RT_SUCCESS(rc))
2016 {
2017 uint32_t u32Checksum2 = RTCrc32(pvPage, PAGE_SIZE);
2018# if 0
2019 AssertMsg((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum, ("GCPhysPage=%RGp\n", GCPhysPage));
2020# else
2021 if ((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum)
2022 LogFlow(("shpg %#x @ %RGp %#x [OK]\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
2023 else
2024 AssertMsgFailed(("shpg %#x @ %RGp %#x\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
2025# endif
2026 }
2027 else
2028 AssertRC(rc);
2029 }
2030 }
2031
2032 } /* for each page */
2033
2034 } /* for each ram range */
2035 }
2036
2037 pgmUnlock(pVM);
2038#endif /* VBOX_STRICT */
2039 NOREF(pVM);
2040}
2041
2042
2043/**
2044 * Resets the physical memory state.
2045 *
2046 * ASSUMES that the caller owns the PGM lock.
2047 *
2048 * @returns VBox status code.
2049 * @param pVM The cross context VM structure.
2050 */
2051int pgmR3PhysRamReset(PVM pVM)
2052{
2053 PGM_LOCK_ASSERT_OWNER(pVM);
2054
2055 /* Reset the memory balloon. */
2056 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
2057 AssertRC(rc);
2058
2059#ifdef VBOX_WITH_PAGE_SHARING
2060 /* Clear all registered shared modules. */
2061 pgmR3PhysAssertSharedPageChecksums(pVM);
2062 rc = GMMR3ResetSharedModules(pVM);
2063 AssertRC(rc);
2064#endif
2065 /* Reset counters. */
2066 pVM->pgm.s.cReusedSharedPages = 0;
2067 pVM->pgm.s.cBalloonedPages = 0;
2068
2069 return VINF_SUCCESS;
2070}
2071
2072
2073/**
2074 * Resets (zeros) the RAM after all devices and components have been reset.
2075 *
2076 * ASSUMES that the caller owns the PGM lock.
2077 *
2078 * @returns VBox status code.
2079 * @param pVM The cross context VM structure.
2080 */
2081int pgmR3PhysRamZeroAll(PVM pVM)
2082{
2083 PGM_LOCK_ASSERT_OWNER(pVM);
2084
2085 /*
2086 * We batch up pages that should be freed instead of calling GMM for
2087 * each and every one of them.
2088 */
2089 uint32_t cPendingPages = 0;
2090 PGMMFREEPAGESREQ pReq;
2091 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2092 AssertLogRelRCReturn(rc, rc);
2093
2094 /*
2095 * Walk the ram ranges.
2096 */
2097 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2098 {
2099 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2100 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2101
2102 if ( !pVM->pgm.s.fRamPreAlloc
2103 && pVM->pgm.s.fZeroRamPagesOnReset)
2104 {
2105 /* Replace all RAM pages by ZERO pages. */
2106 while (iPage-- > 0)
2107 {
2108 PPGMPAGE pPage = &pRam->aPages[iPage];
2109 switch (PGM_PAGE_GET_TYPE(pPage))
2110 {
2111 case PGMPAGETYPE_RAM:
2112 /* Do not replace pages part of a 2 MB continuous range
2113 with zero pages, but zero them instead. */
2114 if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
2115 || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2116 {
2117 void *pvPage;
2118 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
2119 AssertLogRelRCReturn(rc, rc);
2120 ASMMemZeroPage(pvPage);
2121 }
2122 else if (PGM_PAGE_IS_BALLOONED(pPage))
2123 {
2124 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
2125 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2126 }
2127 else if (!PGM_PAGE_IS_ZERO(pPage))
2128 {
2129 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2130 PGMPAGETYPE_RAM);
2131 AssertLogRelRCReturn(rc, rc);
2132 }
2133 break;
2134
2135 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2136 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
2137 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2138 true /*fDoAccounting*/);
2139 break;
2140
2141 case PGMPAGETYPE_MMIO2:
2142 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2143 case PGMPAGETYPE_ROM:
2144 case PGMPAGETYPE_MMIO:
2145 break;
2146 default:
2147 AssertFailed();
2148 }
2149 } /* for each page */
2150 }
2151 else
2152 {
2153 /* Zero the memory. */
2154 while (iPage-- > 0)
2155 {
2156 PPGMPAGE pPage = &pRam->aPages[iPage];
2157 switch (PGM_PAGE_GET_TYPE(pPage))
2158 {
2159 case PGMPAGETYPE_RAM:
2160 switch (PGM_PAGE_GET_STATE(pPage))
2161 {
2162 case PGM_PAGE_STATE_ZERO:
2163 break;
2164
2165 case PGM_PAGE_STATE_BALLOONED:
2166 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
2167 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2168 break;
2169
2170 case PGM_PAGE_STATE_SHARED:
2171 case PGM_PAGE_STATE_WRITE_MONITORED:
2172 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
2173 AssertLogRelRCReturn(rc, rc);
2174 RT_FALL_THRU();
2175
2176 case PGM_PAGE_STATE_ALLOCATED:
2177 if (pVM->pgm.s.fZeroRamPagesOnReset)
2178 {
2179 void *pvPage;
2180 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
2181 AssertLogRelRCReturn(rc, rc);
2182 ASMMemZeroPage(pvPage);
2183 }
2184 break;
2185 }
2186 break;
2187
2188 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2189 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
2190 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2191 true /*fDoAccounting*/);
2192 break;
2193
2194 case PGMPAGETYPE_MMIO2:
2195 case PGMPAGETYPE_ROM_SHADOW:
2196 case PGMPAGETYPE_ROM:
2197 case PGMPAGETYPE_MMIO:
2198 break;
2199 default:
2200 AssertFailed();
2201
2202 }
2203 } /* for each page */
2204 }
2205
2206 }
2207
2208 /*
2209 * Finish off any pages pending freeing.
2210 */
2211 if (cPendingPages)
2212 {
2213 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2214 AssertLogRelRCReturn(rc, rc);
2215 }
2216 GMMR3FreePagesCleanup(pReq);
2217 return VINF_SUCCESS;
2218}
2219
2220
2221/**
2222 * Frees all RAM during VM termination
2223 *
2224 * ASSUMES that the caller owns the PGM lock.
2225 *
2226 * @returns VBox status code.
2227 * @param pVM The cross context VM structure.
2228 */
2229int pgmR3PhysRamTerm(PVM pVM)
2230{
2231 PGM_LOCK_ASSERT_OWNER(pVM);
2232
2233 /* Reset the memory balloon. */
2234 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
2235 AssertRC(rc);
2236
2237#ifdef VBOX_WITH_PAGE_SHARING
2238 /*
2239 * Clear all registered shared modules.
2240 */
2241 pgmR3PhysAssertSharedPageChecksums(pVM);
2242 rc = GMMR3ResetSharedModules(pVM);
2243 AssertRC(rc);
2244
2245 /*
2246 * Flush the handy pages updates to make sure no shared pages are hiding
2247 * in there. (No unlikely if the VM shuts down, apparently.)
2248 */
2249 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_FLUSH_HANDY_PAGES, 0, NULL);
2250#endif
2251
2252 /*
2253 * We batch up pages that should be freed instead of calling GMM for
2254 * each and every one of them.
2255 */
2256 uint32_t cPendingPages = 0;
2257 PGMMFREEPAGESREQ pReq;
2258 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2259 AssertLogRelRCReturn(rc, rc);
2260
2261 /*
2262 * Walk the ram ranges.
2263 */
2264 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2265 {
2266 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2267 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2268
2269 while (iPage-- > 0)
2270 {
2271 PPGMPAGE pPage = &pRam->aPages[iPage];
2272 switch (PGM_PAGE_GET_TYPE(pPage))
2273 {
2274 case PGMPAGETYPE_RAM:
2275 /* Free all shared pages. Private pages are automatically freed during GMM VM cleanup. */
2276 /** @todo change this to explicitly free private pages here. */
2277 if (PGM_PAGE_IS_SHARED(pPage))
2278 {
2279 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2280 PGMPAGETYPE_RAM);
2281 AssertLogRelRCReturn(rc, rc);
2282 }
2283 break;
2284
2285 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2286 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
2287 case PGMPAGETYPE_MMIO2:
2288 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2289 case PGMPAGETYPE_ROM:
2290 case PGMPAGETYPE_MMIO:
2291 break;
2292 default:
2293 AssertFailed();
2294 }
2295 } /* for each page */
2296 }
2297
2298 /*
2299 * Finish off any pages pending freeing.
2300 */
2301 if (cPendingPages)
2302 {
2303 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2304 AssertLogRelRCReturn(rc, rc);
2305 }
2306 GMMR3FreePagesCleanup(pReq);
2307 return VINF_SUCCESS;
2308}
2309
2310
2311/**
2312 * This is the interface IOM is using to register an MMIO region.
2313 *
2314 * It will check for conflicts and ensure that a RAM range structure
2315 * is present before calling the PGMR3HandlerPhysicalRegister API to
2316 * register the callbacks.
2317 *
2318 * @returns VBox status code.
2319 *
2320 * @param pVM The cross context VM structure.
2321 * @param GCPhys The start of the MMIO region.
2322 * @param cb The size of the MMIO region.
2323 * @param hType The physical access handler type registration.
2324 * @param pvUserR3 The user argument for R3.
2325 * @param pvUserR0 The user argument for R0.
2326 * @param pvUserRC The user argument for RC.
2327 * @param pszDesc The description of the MMIO region.
2328 */
2329VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType,
2330 RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, const char *pszDesc)
2331{
2332 /*
2333 * Assert on some assumption.
2334 */
2335 VM_ASSERT_EMT(pVM);
2336 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2337 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2338 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2339 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2340 Assert(((PPGMPHYSHANDLERTYPEINT)MMHyperHeapOffsetToPtr(pVM, hType))->enmKind == PGMPHYSHANDLERKIND_MMIO);
2341
2342 int rc = pgmLock(pVM);
2343 AssertRCReturn(rc, rc);
2344
2345 /*
2346 * Make sure there's a RAM range structure for the region.
2347 */
2348 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2349 bool fRamExists = false;
2350 PPGMRAMRANGE pRamPrev = NULL;
2351 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2352 while (pRam && GCPhysLast >= pRam->GCPhys)
2353 {
2354 if ( GCPhysLast >= pRam->GCPhys
2355 && GCPhys <= pRam->GCPhysLast)
2356 {
2357 /* Simplification: all within the same range. */
2358 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
2359 && GCPhysLast <= pRam->GCPhysLast,
2360 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
2361 GCPhys, GCPhysLast, pszDesc,
2362 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2363 pgmUnlock(pVM),
2364 VERR_PGM_RAM_CONFLICT);
2365
2366 /* Check that it's all RAM or MMIO pages. */
2367 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2368 uint32_t cLeft = cb >> PAGE_SHIFT;
2369 while (cLeft-- > 0)
2370 {
2371 AssertLogRelMsgReturnStmt( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
2372 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
2373 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
2374 GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
2375 pgmUnlock(pVM),
2376 VERR_PGM_RAM_CONFLICT);
2377 pPage++;
2378 }
2379
2380 /* Looks good. */
2381 fRamExists = true;
2382 break;
2383 }
2384
2385 /* next */
2386 pRamPrev = pRam;
2387 pRam = pRam->pNextR3;
2388 }
2389 PPGMRAMRANGE pNew;
2390 if (fRamExists)
2391 {
2392 pNew = NULL;
2393
2394 /*
2395 * Make all the pages in the range MMIO/ZERO pages, freeing any
2396 * RAM pages currently mapped here. This might not be 100% correct
2397 * for PCI memory, but we're doing the same thing for MMIO2 pages.
2398 */
2399 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
2400 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
2401
2402 /* Force a PGM pool flush as guest ram references have been changed. */
2403 /** @todo not entirely SMP safe; assuming for now the guest takes
2404 * care of this internally (not touch mapped mmio while changing the
2405 * mapping). */
2406 PVMCPU pVCpu = VMMGetCpu(pVM);
2407 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2408 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2409 }
2410 else
2411 {
2412
2413 /*
2414 * No RAM range, insert an ad hoc one.
2415 *
2416 * Note that we don't have to tell REM about this range because
2417 * PGMHandlerPhysicalRegisterEx will do that for us.
2418 */
2419 Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
2420
2421 const uint32_t cPages = cb >> PAGE_SHIFT;
2422 const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
2423 rc = MMHyperAlloc(pVM, RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
2424 AssertLogRelMsgRCReturnStmt(rc, ("cbRamRange=%zu\n", cbRamRange), pgmUnlock(pVM), rc);
2425
2426 /* Initialize the range. */
2427 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
2428 pNew->GCPhys = GCPhys;
2429 pNew->GCPhysLast = GCPhysLast;
2430 pNew->cb = cb;
2431 pNew->pszDesc = pszDesc;
2432 pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
2433 pNew->pvR3 = NULL;
2434 pNew->paLSPages = NULL;
2435
2436 uint32_t iPage = cPages;
2437 while (iPage-- > 0)
2438 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
2439 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
2440
2441 /* update the page count stats. */
2442 pVM->pgm.s.cPureMmioPages += cPages;
2443 pVM->pgm.s.cAllPages += cPages;
2444
2445 /* link it */
2446 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
2447 }
2448
2449 /*
2450 * Register the access handler.
2451 */
2452 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc);
2453 if ( RT_FAILURE(rc)
2454 && !fRamExists)
2455 {
2456 pVM->pgm.s.cPureMmioPages -= cb >> PAGE_SHIFT;
2457 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
2458
2459 /* remove the ad hoc range. */
2460 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
2461 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
2462 MMHyperFree(pVM, pRam);
2463 }
2464 pgmPhysInvalidatePageMapTLB(pVM);
2465
2466 pgmUnlock(pVM);
2467 return rc;
2468}
2469
2470
2471/**
2472 * This is the interface IOM is using to register an MMIO region.
2473 *
2474 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
2475 * any ad hoc PGMRAMRANGE left behind.
2476 *
2477 * @returns VBox status code.
2478 * @param pVM The cross context VM structure.
2479 * @param GCPhys The start of the MMIO region.
2480 * @param cb The size of the MMIO region.
2481 */
2482VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
2483{
2484 VM_ASSERT_EMT(pVM);
2485
2486 int rc = pgmLock(pVM);
2487 AssertRCReturn(rc, rc);
2488
2489 /*
2490 * First deregister the handler, then check if we should remove the ram range.
2491 */
2492 rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2493 if (RT_SUCCESS(rc))
2494 {
2495 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2496 PPGMRAMRANGE pRamPrev = NULL;
2497 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2498 while (pRam && GCPhysLast >= pRam->GCPhys)
2499 {
2500 /** @todo We're being a bit too careful here. rewrite. */
2501 if ( GCPhysLast == pRam->GCPhysLast
2502 && GCPhys == pRam->GCPhys)
2503 {
2504 Assert(pRam->cb == cb);
2505
2506 /*
2507 * See if all the pages are dead MMIO pages.
2508 */
2509 uint32_t const cPages = cb >> PAGE_SHIFT;
2510 bool fAllMMIO = true;
2511 uint32_t iPage = 0;
2512 uint32_t cLeft = cPages;
2513 while (cLeft-- > 0)
2514 {
2515 PPGMPAGE pPage = &pRam->aPages[iPage];
2516 if ( !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
2517 /*|| not-out-of-action later */)
2518 {
2519 fAllMMIO = false;
2520 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2521 break;
2522 }
2523 Assert( PGM_PAGE_IS_ZERO(pPage)
2524 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
2525 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
2526 pPage++;
2527 }
2528 if (fAllMMIO)
2529 {
2530 /*
2531 * Ad-hoc range, unlink and free it.
2532 */
2533 Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
2534 GCPhys, GCPhysLast, pRam->pszDesc));
2535
2536 pVM->pgm.s.cAllPages -= cPages;
2537 pVM->pgm.s.cPureMmioPages -= cPages;
2538
2539 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
2540 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
2541 MMHyperFree(pVM, pRam);
2542 break;
2543 }
2544 }
2545
2546 /*
2547 * Range match? It will all be within one range (see PGMAllHandler.cpp).
2548 */
2549 if ( GCPhysLast >= pRam->GCPhys
2550 && GCPhys <= pRam->GCPhysLast)
2551 {
2552 Assert(GCPhys >= pRam->GCPhys);
2553 Assert(GCPhysLast <= pRam->GCPhysLast);
2554
2555 /*
2556 * Turn the pages back into RAM pages.
2557 */
2558 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2559 uint32_t cLeft = cb >> PAGE_SHIFT;
2560 while (cLeft--)
2561 {
2562 PPGMPAGE pPage = &pRam->aPages[iPage];
2563 AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage))
2564 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
2565 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
2566 ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2567 if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage))
2568 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
2569 }
2570 break;
2571 }
2572
2573 /* next */
2574 pRamPrev = pRam;
2575 pRam = pRam->pNextR3;
2576 }
2577 }
2578
2579 /* Force a PGM pool flush as guest ram references have been changed. */
2580 /** @todo Not entirely SMP safe; assuming for now the guest takes care of
2581 * this internally (not touch mapped mmio while changing the mapping). */
2582 PVMCPU pVCpu = VMMGetCpu(pVM);
2583 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2584 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2585
2586 pgmPhysInvalidatePageMapTLB(pVM);
2587 pgmPhysInvalidRamRangeTlbs(pVM);
2588 pgmUnlock(pVM);
2589 return rc;
2590}
2591
2592
2593/**
2594 * Locate a MMIO2 range.
2595 *
2596 * @returns Pointer to the MMIO2 range.
2597 * @param pVM The cross context VM structure.
2598 * @param pDevIns The device instance owning the region.
2599 * @param iSubDev The sub-device number.
2600 * @param iRegion The region.
2601 * @param hMmio2 Handle to look up. If NIL, use the @a iSubDev and
2602 * @a iRegion.
2603 */
2604DECLINLINE(PPGMREGMMIO2RANGE) pgmR3PhysMMIOExFind(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev,
2605 uint32_t iRegion, PGMMMIO2HANDLE hMmio2)
2606{
2607 if (hMmio2 != NIL_PGMMMIO2HANDLE)
2608 {
2609 if (hMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3) && hMmio2 != 0)
2610 {
2611 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.apMmio2RangesR3[hMmio2 - 1];
2612 if (pCur && pCur->pDevInsR3 == pDevIns)
2613 {
2614 Assert(pCur->idMmio2 == hMmio2);
2615 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_MMIO2, NULL);
2616 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
2617 return pCur;
2618 }
2619 Assert(!pCur);
2620 }
2621 for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
2622 if (pCur->idMmio2 == hMmio2)
2623 {
2624 AssertBreak(pCur->pDevInsR3 == pDevIns);
2625 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_MMIO2, NULL);
2626 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
2627 return pCur;
2628 }
2629 }
2630 else
2631 {
2632 /*
2633 * Search the list. There shouldn't be many entries.
2634 */
2635 /** @todo Optimize this lookup! There may now be many entries and it'll
2636 * become really slow when doing MMR3HyperMapMMIO2 and similar. */
2637 for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
2638 if ( pCur->pDevInsR3 == pDevIns
2639 && pCur->iRegion == iRegion
2640 && pCur->iSubDev == iSubDev)
2641 return pCur;
2642 }
2643 return NULL;
2644}
2645
2646
2647#ifndef PGM_WITHOUT_MAPPINGS
2648/**
2649 * @callback_method_impl{FNPGMRELOCATE, Relocate a floating MMIO/MMIO2 range.}
2650 * @sa pgmR3PhysRamRangeRelocate
2651 */
2652static DECLCALLBACK(bool) pgmR3PhysMMIOExRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
2653 PGMRELOCATECALL enmMode, void *pvUser)
2654{
2655 PPGMREGMMIO2RANGE pMmio = (PPGMREGMMIO2RANGE)pvUser;
2656 Assert(pMmio->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
2657 Assert(pMmio->RamRange.pSelfRC == GCPtrOld + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange)); RT_NOREF_PV(GCPtrOld);
2658
2659 switch (enmMode)
2660 {
2661 case PGMRELOCATECALL_SUGGEST:
2662 return true;
2663
2664 case PGMRELOCATECALL_RELOCATE:
2665 {
2666 /*
2667 * Update myself, then relink all the ranges and flush the RC TLB.
2668 */
2669 pgmLock(pVM);
2670
2671 pMmio->RamRange.pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange));
2672
2673 pgmR3PhysRelinkRamRanges(pVM);
2674 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
2675 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
2676
2677 pgmUnlock(pVM);
2678 return true;
2679 }
2680
2681 default:
2682 AssertFailedReturn(false);
2683 }
2684}
2685#endif /* !PGM_WITHOUT_MAPPINGS */
2686
2687
2688/**
2689 * Calculates the number of chunks
2690 *
2691 * @returns Number of registration chunk needed.
2692 * @param pVM The cross context VM structure.
2693 * @param cb The size of the MMIO/MMIO2 range.
2694 * @param pcPagesPerChunk Where to return the number of pages tracked by each
2695 * chunk. Optional.
2696 * @param pcbChunk Where to return the guest mapping size for a chunk.
2697 */
2698static uint16_t pgmR3PhysMMIOExCalcChunkCount(PVM pVM, RTGCPHYS cb, uint32_t *pcPagesPerChunk, uint32_t *pcbChunk)
2699{
2700 RT_NOREF_PV(pVM); /* without raw mode */
2701
2702 /*
2703 * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be
2704 * needing a few bytes extra the PGMREGMMIO2RANGE structure.
2705 *
2706 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving
2707 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB).
2708 */
2709 uint32_t cbChunk = 16U*_1M;
2710 uint32_t cPagesPerChunk = 1048048; /* max ~1048059 */
2711 AssertCompile(sizeof(PGMREGMMIO2RANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
2712 AssertRelease(cPagesPerChunk <= PGM_MMIO2_MAX_PAGE_COUNT); /* See above note. */
2713 AssertRelease(RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
2714 if (pcbChunk)
2715 *pcbChunk = cbChunk;
2716 if (pcPagesPerChunk)
2717 *pcPagesPerChunk = cPagesPerChunk;
2718
2719 /* Calc the number of chunks we need. */
2720 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2721 uint16_t cChunks = (uint16_t)((cPages + cPagesPerChunk - 1) / cPagesPerChunk);
2722 AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cPages);
2723 return cChunks;
2724}
2725
2726
2727/**
2728 * Worker for PGMR3PhysMMIO2Register that allocates and the PGMREGMMIO2RANGE
2729 * structures and does basic initialization.
2730 *
2731 * Caller must set type specfic members and initialize the PGMPAGE structures.
2732 *
2733 * This was previously also used by PGMR3PhysMMIOExPreRegister, a function for
2734 * pre-registering MMIO that was later (6.1) replaced by a new handle based IOM
2735 * interface. The reference to caller and type above is purely historical.
2736 *
2737 * @returns VBox status code.
2738 * @param pVM The cross context VM structure.
2739 * @param pDevIns The device instance owning the region.
2740 * @param iSubDev The sub-device number (internal PCI config number).
2741 * @param iRegion The region number. If the MMIO2 memory is a PCI
2742 * I/O region this number has to be the number of that
2743 * region. Otherwise it can be any number safe
2744 * UINT8_MAX.
2745 * @param cb The size of the region. Must be page aligned.
2746 * @param pszDesc The description.
2747 * @param ppHeadRet Where to return the pointer to the first
2748 * registration chunk.
2749 *
2750 * @thread EMT
2751 */
2752static int pgmR3PhysMMIOExCreate(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
2753 const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet)
2754{
2755 /*
2756 * Figure out how many chunks we need and of which size.
2757 */
2758 uint32_t cPagesPerChunk;
2759 uint16_t cChunks = pgmR3PhysMMIOExCalcChunkCount(pVM, cb, &cPagesPerChunk, NULL);
2760 AssertReturn(cChunks, VERR_PGM_PHYS_MMIO_EX_IPE);
2761
2762 /*
2763 * Allocate the chunks.
2764 */
2765 PPGMREGMMIO2RANGE *ppNext = ppHeadRet;
2766 *ppNext = NULL;
2767
2768 int rc = VINF_SUCCESS;
2769 uint32_t cPagesLeft = cb >> X86_PAGE_SHIFT;
2770 for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++)
2771 {
2772 /*
2773 * We currently do a single RAM range for the whole thing. This will
2774 * probably have to change once someone needs really large MMIO regions,
2775 * as we will be running into SUPR3PageAllocEx limitations and such.
2776 */
2777 const uint32_t cPagesTrackedByChunk = RT_MIN(cPagesLeft, cPagesPerChunk);
2778 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesTrackedByChunk]);
2779 PPGMREGMMIO2RANGE pNew = NULL;
2780 if ( iChunk + 1 < cChunks
2781 || cbRange >= _1M)
2782 {
2783 /*
2784 * Allocate memory for the registration structure.
2785 */
2786 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
2787 size_t const cbChunk = (1 + cChunkPages + 1) << PAGE_SHIFT;
2788 AssertLogRelBreakStmt(cbChunk == (uint32_t)cbChunk, rc = VERR_OUT_OF_RANGE);
2789 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
2790 AssertBreakStmt(paChunkPages, rc = VERR_NO_TMP_MEMORY);
2791 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
2792 void *pvChunk = NULL;
2793 rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages);
2794 AssertLogRelMsgRCBreakStmt(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages), RTMemTmpFree(paChunkPages));
2795
2796 Assert(R0PtrChunk != NIL_RTR0PTR);
2797 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
2798
2799 pNew = (PPGMREGMMIO2RANGE)pvChunk;
2800 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_FLOATING;
2801 pNew->RamRange.pSelfR0 = R0PtrChunk + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange);
2802
2803 RTMemTmpFree(paChunkPages);
2804 }
2805 /*
2806 * Not so big, do a one time hyper allocation.
2807 */
2808 else
2809 {
2810 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
2811 AssertLogRelMsgRCBreak(rc, ("cbRange=%zu\n", cbRange));
2812
2813 /*
2814 * Initialize allocation specific items.
2815 */
2816 //pNew->RamRange.fFlags = 0;
2817 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
2818 }
2819
2820 /*
2821 * Initialize the registration structure (caller does specific bits).
2822 */
2823 pNew->pDevInsR3 = pDevIns;
2824 //pNew->pvR3 = NULL;
2825 //pNew->pNext = NULL;
2826 //pNew->fFlags = 0;
2827 if (iChunk == 0)
2828 pNew->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK;
2829 if (iChunk + 1 == cChunks)
2830 pNew->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK;
2831 pNew->iSubDev = iSubDev;
2832 pNew->iRegion = iRegion;
2833 pNew->idSavedState = UINT8_MAX;
2834 pNew->idMmio2 = UINT8_MAX;
2835 //pNew->pPhysHandlerR3 = NULL;
2836 //pNew->paLSPages = NULL;
2837 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
2838 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
2839 pNew->RamRange.pszDesc = pszDesc;
2840 pNew->RamRange.cb = pNew->cbReal = (RTGCPHYS)cPagesTrackedByChunk << X86_PAGE_SHIFT;
2841 pNew->RamRange.fFlags |= PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX;
2842 //pNew->RamRange.pvR3 = NULL;
2843 //pNew->RamRange.paLSPages = NULL;
2844
2845 *ppNext = pNew;
2846 ASMCompilerBarrier();
2847 cPagesLeft -= cPagesTrackedByChunk;
2848 ppNext = &pNew->pNextR3;
2849 }
2850 Assert(cPagesLeft == 0);
2851
2852 if (RT_SUCCESS(rc))
2853 {
2854 Assert((*ppHeadRet)->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
2855 return VINF_SUCCESS;
2856 }
2857
2858 /*
2859 * Free floating ranges.
2860 */
2861 while (*ppHeadRet)
2862 {
2863 PPGMREGMMIO2RANGE pFree = *ppHeadRet;
2864 *ppHeadRet = pFree->pNextR3;
2865
2866 if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
2867 {
2868 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[pFree->RamRange.cb >> X86_PAGE_SHIFT]);
2869 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
2870 SUPR3PageFreeEx(pFree, cChunkPages);
2871 }
2872 }
2873
2874 return rc;
2875}
2876
2877
2878/**
2879 * Common worker PGMR3PhysMMIOExPreRegister & PGMR3PhysMMIO2Register that links
2880 * a complete registration entry into the lists and lookup tables.
2881 *
2882 * @param pVM The cross context VM structure.
2883 * @param pNew The new MMIO / MMIO2 registration to link.
2884 */
2885static void pgmR3PhysMMIOExLink(PVM pVM, PPGMREGMMIO2RANGE pNew)
2886{
2887 /*
2888 * Link it into the list (order doesn't matter, so insert it at the head).
2889 *
2890 * Note! The range we're link may consist of multiple chunks, so we have to
2891 * find the last one.
2892 */
2893 PPGMREGMMIO2RANGE pLast = pNew;
2894 for (pLast = pNew; ; pLast = pLast->pNextR3)
2895 {
2896 if (pLast->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
2897 break;
2898 Assert(pLast->pNextR3);
2899 Assert(pLast->pNextR3->pDevInsR3 == pNew->pDevInsR3);
2900 Assert(pLast->pNextR3->iSubDev == pNew->iSubDev);
2901 Assert(pLast->pNextR3->iRegion == pNew->iRegion);
2902 Assert((pLast->pNextR3->fFlags & PGMREGMMIO2RANGE_F_MMIO2) == (pNew->fFlags & PGMREGMMIO2RANGE_F_MMIO2));
2903 Assert(pLast->pNextR3->idMmio2 == (pLast->fFlags & PGMREGMMIO2RANGE_F_MMIO2 ? pNew->idMmio2 + 1 : UINT8_MAX));
2904 }
2905
2906 pgmLock(pVM);
2907
2908 /* Link in the chain of ranges at the head of the list. */
2909 pLast->pNextR3 = pVM->pgm.s.pRegMmioRangesR3;
2910 pVM->pgm.s.pRegMmioRangesR3 = pNew;
2911
2912 /* If MMIO, insert the MMIO2 range/page IDs. */
2913 uint8_t idMmio2 = pNew->idMmio2;
2914 if (idMmio2 != UINT8_MAX)
2915 {
2916 for (;;)
2917 {
2918 Assert(pNew->fFlags & PGMREGMMIO2RANGE_F_MMIO2);
2919 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
2920 Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
2921 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
2922 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = pNew->RamRange.pSelfR0 - RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange);
2923 if (pNew->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
2924 break;
2925 pNew = pNew->pNextR3;
2926 }
2927 }
2928 else
2929 Assert(!(pNew->fFlags & PGMREGMMIO2RANGE_F_MMIO2));
2930
2931 pgmPhysInvalidatePageMapTLB(pVM);
2932 pgmUnlock(pVM);
2933}
2934
2935
2936/**
2937 * Allocate and register an MMIO2 region.
2938 *
2939 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM
2940 * associated with a device. It is also non-shared memory with a permanent
2941 * ring-3 mapping and page backing (presently).
2942 *
2943 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
2944 * the VM, in which case we'll drop the base memory pages. Presently we will
2945 * make no attempt to preserve anything that happens to be present in the base
2946 * memory that is replaced, this is of course incorrect but it's too much
2947 * effort.
2948 *
2949 * @returns VBox status code.
2950 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
2951 * memory.
2952 * @retval VERR_ALREADY_EXISTS if the region already exists.
2953 *
2954 * @param pVM The cross context VM structure.
2955 * @param pDevIns The device instance owning the region.
2956 * @param iSubDev The sub-device number.
2957 * @param iRegion The region number. If the MMIO2 memory is a PCI
2958 * I/O region this number has to be the number of that
2959 * region. Otherwise it can be any number save
2960 * UINT8_MAX.
2961 * @param cb The size of the region. Must be page aligned.
2962 * @param fFlags Reserved for future use, must be zero.
2963 * @param pszDesc The description.
2964 * @param ppv Where to store the pointer to the ring-3 mapping of
2965 * the memory.
2966 * @param phRegion Where to return the MMIO2 region handle. Optional.
2967 * @thread EMT
2968 */
2969VMMR3_INT_DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
2970 uint32_t fFlags, const char *pszDesc, void **ppv, PGMMMIO2HANDLE *phRegion)
2971{
2972 /*
2973 * Validate input.
2974 */
2975 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
2976 *ppv = NULL;
2977 if (phRegion)
2978 {
2979 AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
2980 *phRegion = NIL_PGMMMIO2HANDLE;
2981 }
2982 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2983 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2984 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
2985 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2986 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2987 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2988 AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE) == NULL, VERR_ALREADY_EXISTS);
2989 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2990 AssertReturn(cb, VERR_INVALID_PARAMETER);
2991 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2992
2993 const uint32_t cPages = cb >> PAGE_SHIFT;
2994 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
2995 AssertLogRelReturn(cPages <= (MM_MMIO_64_MAX >> X86_PAGE_SHIFT), VERR_OUT_OF_RANGE);
2996
2997 /*
2998 * For the 2nd+ instance, mangle the description string so it's unique.
2999 */
3000 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
3001 {
3002 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
3003 if (!pszDesc)
3004 return VERR_NO_MEMORY;
3005 }
3006
3007 /*
3008 * Allocate an MMIO2 range ID (not freed on failure).
3009 *
3010 * The zero ID is not used as it could be confused with NIL_GMM_PAGEID, so
3011 * the IDs goes from 1 thru PGM_MMIO2_MAX_RANGES.
3012 */
3013 unsigned cChunks = pgmR3PhysMMIOExCalcChunkCount(pVM, cb, NULL, NULL);
3014 pgmLock(pVM);
3015 uint8_t idMmio2 = pVM->pgm.s.cMmio2Regions + 1;
3016 unsigned cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks;
3017 if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES)
3018 {
3019 pgmUnlock(pVM);
3020 AssertLogRelFailedReturn(VERR_PGM_TOO_MANY_MMIO2_RANGES);
3021 }
3022 pVM->pgm.s.cMmio2Regions = cNewMmio2Regions;
3023 pgmUnlock(pVM);
3024
3025 /*
3026 * Try reserve and allocate the backing memory first as this is what is
3027 * most likely to fail.
3028 */
3029 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
3030 if (RT_SUCCESS(rc))
3031 {
3032 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
3033 if (RT_SUCCESS(rc))
3034 {
3035 void *pvPages;
3036 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
3037 if (RT_SUCCESS(rc))
3038 {
3039 memset(pvPages, 0, cPages * PAGE_SIZE);
3040
3041 /*
3042 * Create the registered MMIO range record for it.
3043 */
3044 PPGMREGMMIO2RANGE pNew;
3045 rc = pgmR3PhysMMIOExCreate(pVM, pDevIns, iSubDev, iRegion, cb, pszDesc, &pNew);
3046 if (RT_SUCCESS(rc))
3047 {
3048 if (phRegion)
3049 *phRegion = idMmio2; /* The ID of the first chunk. */
3050
3051 uint32_t iSrcPage = 0;
3052 uint8_t *pbCurPages = (uint8_t *)pvPages;
3053 for (PPGMREGMMIO2RANGE pCur = pNew; pCur; pCur = pCur->pNextR3)
3054 {
3055 pCur->pvR3 = pbCurPages;
3056 pCur->RamRange.pvR3 = pbCurPages;
3057 pCur->idMmio2 = idMmio2;
3058 pCur->fFlags |= PGMREGMMIO2RANGE_F_MMIO2;
3059
3060 uint32_t iDstPage = pCur->RamRange.cb >> X86_PAGE_SHIFT;
3061 while (iDstPage-- > 0)
3062 {
3063 PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage],
3064 paPages[iDstPage + iSrcPage].Phys,
3065 PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
3066 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
3067 }
3068
3069 /* advance. */
3070 iSrcPage += pCur->RamRange.cb >> X86_PAGE_SHIFT;
3071 pbCurPages += pCur->RamRange.cb;
3072 idMmio2++;
3073 }
3074
3075 RTMemTmpFree(paPages);
3076
3077 /*
3078 * Update the page count stats, link the registration and we're done.
3079 */
3080 pVM->pgm.s.cAllPages += cPages;
3081 pVM->pgm.s.cPrivatePages += cPages;
3082
3083 pgmR3PhysMMIOExLink(pVM, pNew);
3084
3085 *ppv = pvPages;
3086 return VINF_SUCCESS;
3087 }
3088
3089 SUPR3PageFreeEx(pvPages, cPages);
3090 }
3091 }
3092 RTMemTmpFree(paPages);
3093 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
3094 }
3095 if (pDevIns->iInstance > 0)
3096 MMR3HeapFree((void *)pszDesc);
3097 return rc;
3098}
3099
3100
3101/**
3102 * Deregisters and frees an MMIO2 region or a pre-registered MMIO region
3103 *
3104 * Any physical (and virtual) access handlers registered for the region must
3105 * be deregistered before calling this function.
3106 *
3107 * @returns VBox status code.
3108 * @param pVM The cross context VM structure.
3109 * @param pDevIns The device instance owning the region.
3110 * @param iSubDev The sub-device number. Pass UINT32_MAX for wildcard
3111 * matching or using @a hMmio2.
3112 * @param iRegion The region. Pass UINT32_MAX for wildcard matching
3113 * or using @a hMmio2.
3114 * @param hMmio2 The MMIO2 handle to use instead of iSubDev and
3115 * iRegion, pass NIL to use those two.
3116 */
3117VMMR3_INT_DECL(int) PGMR3PhysMMIOExDeregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev,
3118 uint32_t iRegion, PGMMMIO2HANDLE hMmio2)
3119{
3120 /*
3121 * Validate input.
3122 */
3123 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3124 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3125 if (hMmio2 != NIL_PGMMMIO2HANDLE)
3126 {
3127 AssertReturn(iSubDev == UINT32_MAX, VERR_INVALID_PARAMETER);
3128 AssertReturn(iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
3129 }
3130 else
3131 {
3132 AssertReturn(iSubDev <= UINT8_MAX || iSubDev == UINT32_MAX, VERR_INVALID_PARAMETER);
3133 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
3134 }
3135
3136 /*
3137 * The loop here scanning all registrations will make sure that multi-chunk ranges
3138 * get properly deregistered, though it's original purpose was the wildcard iRegion.
3139 */
3140 pgmLock(pVM);
3141 int rc = VINF_SUCCESS;
3142 unsigned cFound = 0;
3143 PPGMREGMMIO2RANGE pPrev = NULL;
3144 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3;
3145 while (pCur)
3146 {
3147 uint32_t const fFlags = pCur->fFlags;
3148 if ( pCur->pDevInsR3 == pDevIns
3149 && ( hMmio2 == NIL_PGMMMIO2HANDLE
3150 ? ( iRegion == UINT32_MAX
3151 || pCur->iRegion == iRegion)
3152 && ( iSubDev == UINT32_MAX
3153 || pCur->iSubDev == iSubDev)
3154 : pCur->idMmio2 == hMmio2
3155 && (fFlags & PGMREGMMIO2RANGE_F_MMIO2)
3156 )
3157 )
3158 {
3159 cFound++;
3160
3161 /*
3162 * Unmap it if it's mapped.
3163 */
3164 if (fFlags & PGMREGMMIO2RANGE_F_MAPPED)
3165 {
3166 int rc2 = PGMR3PhysMMIOExUnmap(pVM, pCur->pDevInsR3, pCur->iSubDev, pCur->iRegion, hMmio2, pCur->RamRange.GCPhys);
3167 AssertRC(rc2);
3168 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
3169 rc = rc2;
3170 }
3171
3172 /*
3173 * Must tell IOM about MMIO (first one only).
3174 */
3175 if ((fFlags & (PGMREGMMIO2RANGE_F_MMIO2 | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) == PGMREGMMIO2RANGE_F_FIRST_CHUNK)
3176 IOMR3MmioExNotifyDeregistered(pVM, pCur->pPhysHandlerR3->pvUserR3);
3177
3178 /*
3179 * Unlink it
3180 */
3181 PPGMREGMMIO2RANGE pNext = pCur->pNextR3;
3182 if (pPrev)
3183 pPrev->pNextR3 = pNext;
3184 else
3185 pVM->pgm.s.pRegMmioRangesR3 = pNext;
3186 pCur->pNextR3 = NULL;
3187
3188 uint8_t idMmio2 = pCur->idMmio2;
3189 if (idMmio2 != UINT8_MAX)
3190 {
3191 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
3192 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
3193 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
3194 }
3195
3196 /*
3197 * Free the memory.
3198 */
3199 const bool fIsMmio2 = RT_BOOL(fFlags & PGMREGMMIO2RANGE_F_MMIO2);
3200 uint32_t const cPages = pCur->cbReal >> PAGE_SHIFT;
3201 if (fIsMmio2)
3202 {
3203 int rc2 = SUPR3PageFreeEx(pCur->pvR3, cPages);
3204 AssertRC(rc2);
3205 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
3206 rc = rc2;
3207
3208 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
3209 AssertRC(rc2);
3210 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
3211 rc = rc2;
3212 }
3213
3214 /* we're leaking hyper memory here if done at runtime. */
3215#ifdef VBOX_STRICT
3216 VMSTATE const enmState = VMR3GetState(pVM);
3217 AssertMsg( enmState == VMSTATE_POWERING_OFF
3218 || enmState == VMSTATE_POWERING_OFF_LS
3219 || enmState == VMSTATE_OFF
3220 || enmState == VMSTATE_OFF_LS
3221 || enmState == VMSTATE_DESTROYING
3222 || enmState == VMSTATE_TERMINATED
3223 || enmState == VMSTATE_CREATING
3224 , ("%s\n", VMR3GetStateName(enmState)));
3225#endif
3226
3227 if (pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
3228 {
3229 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPages]);
3230 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
3231 SUPR3PageFreeEx(pCur, cChunkPages);
3232 }
3233 /*else
3234 {
3235 rc = MMHyperFree(pVM, pCur); - does not work, see the alloc call.
3236 AssertRCReturn(rc, rc);
3237 } */
3238
3239
3240 /* update page count stats */
3241 pVM->pgm.s.cAllPages -= cPages;
3242 if (fIsMmio2)
3243 pVM->pgm.s.cPrivatePages -= cPages;
3244 else
3245 pVM->pgm.s.cPureMmioPages -= cPages;
3246
3247 /* next */
3248 pCur = pNext;
3249 if (hMmio2 != NIL_PGMMMIO2HANDLE)
3250 {
3251 if (fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3252 break;
3253 hMmio2++;
3254 Assert(pCur->idMmio2 == hMmio2);
3255 Assert(pCur->pDevInsR3 == pDevIns);
3256 Assert(!(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK));
3257 }
3258 }
3259 else
3260 {
3261 pPrev = pCur;
3262 pCur = pCur->pNextR3;
3263 }
3264 }
3265 pgmPhysInvalidatePageMapTLB(pVM);
3266 pgmUnlock(pVM);
3267 return !cFound && (hMmio2 != NIL_PGMMMIO2HANDLE || (iRegion != UINT32_MAX && iSubDev != UINT32_MAX)) ? VERR_NOT_FOUND : rc;
3268}
3269
3270
3271/**
3272 * Maps a MMIO2 region or a pre-registered MMIO region.
3273 *
3274 * This is done when a guest / the bios / state loading changes the
3275 * PCI config. The replacing of base memory has the same restrictions
3276 * as during registration, of course.
3277 *
3278 * @returns VBox status code.
3279 *
3280 * @param pVM The cross context VM structure.
3281 * @param pDevIns The device instance owning the region.
3282 * @param iSubDev The sub-device number of the registered region.
3283 * Pass UINT32_MAX if @a hMmio2 is given.
3284 * @param iRegion The index of the registered region. Pass UINT32_MAX
3285 * if @a hMmio2 is given.
3286 * @param hMmio2 The handle of the region to map as an alternative to
3287 * @a iSubDev and @a iRegion, pass NIL to use the
3288 * other two.
3289 * @param GCPhys The guest-physical address to be remapped.
3290 */
3291VMMR3_INT_DECL(int) PGMR3PhysMMIOExMap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
3292 PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
3293{
3294 /*
3295 * Validate input.
3296 *
3297 * Note! It's safe to walk the MMIO/MMIO2 list since registrations only
3298 * happens during VM construction.
3299 */
3300 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3301 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3302 if (hMmio2 != NIL_PGMMMIO2HANDLE)
3303 {
3304 AssertReturn(iSubDev == UINT32_MAX, VERR_INVALID_PARAMETER);
3305 AssertReturn(iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
3306 }
3307 else
3308 {
3309 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3310 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3311 }
3312 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
3313 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
3314 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3315
3316 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion, hMmio2);
3317 AssertReturn(pFirstMmio, VERR_NOT_FOUND);
3318 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
3319
3320 PPGMREGMMIO2RANGE pLastMmio = pFirstMmio;
3321 RTGCPHYS cbRange = 0;
3322 for (;;)
3323 {
3324 AssertReturn(!(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), VERR_WRONG_ORDER);
3325 Assert(pLastMmio->RamRange.GCPhys == NIL_RTGCPHYS);
3326 Assert(pLastMmio->RamRange.GCPhysLast == NIL_RTGCPHYS);
3327 Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
3328 Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
3329 Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
3330 cbRange += pLastMmio->RamRange.cb;
3331 if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3332 break;
3333 pLastMmio = pLastMmio->pNextR3;
3334 }
3335
3336 RTGCPHYS GCPhysLast = GCPhys + cbRange - 1;
3337 AssertLogRelReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
3338
3339 /*
3340 * Find our location in the ram range list, checking for restriction
3341 * we don't bother implementing yet (partially overlapping, multiple
3342 * ram ranges).
3343 */
3344 pgmLock(pVM);
3345
3346 AssertReturnStmt(!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), pgmUnlock(pVM), VERR_WRONG_ORDER);
3347
3348 bool fRamExists = false;
3349 PPGMRAMRANGE pRamPrev = NULL;
3350 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
3351 while (pRam && GCPhysLast >= pRam->GCPhys)
3352 {
3353 if ( GCPhys <= pRam->GCPhysLast
3354 && GCPhysLast >= pRam->GCPhys)
3355 {
3356 /* Completely within? */
3357 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
3358 && GCPhysLast <= pRam->GCPhysLast,
3359 ("%RGp-%RGp (MMIOEx/%s) falls partly outside %RGp-%RGp (%s)\n",
3360 GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc,
3361 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
3362 pgmUnlock(pVM),
3363 VERR_PGM_RAM_CONFLICT);
3364
3365 /* Check that all the pages are RAM pages. */
3366 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3367 uint32_t cPagesLeft = cbRange >> PAGE_SHIFT;
3368 while (cPagesLeft-- > 0)
3369 {
3370 AssertLogRelMsgReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
3371 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
3372 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc),
3373 pgmUnlock(pVM),
3374 VERR_PGM_RAM_CONFLICT);
3375 pPage++;
3376 }
3377
3378 /* There can only be one MMIO/MMIO2 chunk matching here! */
3379 AssertLogRelMsgReturnStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK,
3380 ("%RGp-%RGp (MMIOEx/%s, flags %#X) consists of multiple chunks whereas the RAM somehow doesn't!\n",
3381 GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
3382 pgmUnlock(pVM),
3383 VERR_PGM_PHYS_MMIO_EX_IPE);
3384
3385 fRamExists = true;
3386 break;
3387 }
3388
3389 /* next */
3390 pRamPrev = pRam;
3391 pRam = pRam->pNextR3;
3392 }
3393 Log(("PGMR3PhysMMIOExMap: %RGp-%RGp fRamExists=%RTbool %s\n", GCPhys, GCPhysLast, fRamExists, pFirstMmio->RamRange.pszDesc));
3394
3395
3396 /*
3397 * Make the changes.
3398 */
3399 RTGCPHYS GCPhysCur = GCPhys;
3400 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3401 {
3402 pCurMmio->RamRange.GCPhys = GCPhysCur;
3403 pCurMmio->RamRange.GCPhysLast = GCPhysCur + pCurMmio->RamRange.cb - 1;
3404 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3405 {
3406 Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
3407 break;
3408 }
3409 GCPhysCur += pCurMmio->RamRange.cb;
3410 }
3411
3412 if (fRamExists)
3413 {
3414 /*
3415 * Make all the pages in the range MMIO/ZERO pages, freeing any
3416 * RAM pages currently mapped here. This might not be 100% correct
3417 * for PCI memory, but we're doing the same thing for MMIO2 pages.
3418 *
3419 * We replace this MMIO/ZERO pages with real pages in the MMIO2 case.
3420 */
3421 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */
3422
3423 int rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
3424 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
3425
3426 if (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2)
3427 {
3428 /* replace the pages, freeing all present RAM pages. */
3429 PPGMPAGE pPageSrc = &pFirstMmio->RamRange.aPages[0];
3430 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3431 uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> PAGE_SHIFT;
3432 while (cPagesLeft-- > 0)
3433 {
3434 Assert(PGM_PAGE_IS_MMIO(pPageDst));
3435
3436 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
3437 uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
3438 PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
3439 PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
3440 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
3441 PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
3442 PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
3443 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
3444 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
3445 /* (We tell NEM at the end of the function.) */
3446
3447 pVM->pgm.s.cZeroPages--;
3448 GCPhys += PAGE_SIZE;
3449 pPageSrc++;
3450 pPageDst++;
3451 }
3452 }
3453
3454 /* Flush physical page map TLB. */
3455 pgmPhysInvalidatePageMapTLB(pVM);
3456
3457 /* Force a PGM pool flush as guest ram references have been changed. */
3458 /** @todo not entirely SMP safe; assuming for now the guest takes care of
3459 * this internally (not touch mapped mmio while changing the mapping). */
3460 PVMCPU pVCpu = VMMGetCpu(pVM);
3461 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3462 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3463 }
3464 else
3465 {
3466 /*
3467 * No RAM range, insert the ones prepared during registration.
3468 */
3469 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3470 {
3471 /* Clear the tracking data of pages we're going to reactivate. */
3472 PPGMPAGE pPageSrc = &pCurMmio->RamRange.aPages[0];
3473 uint32_t cPagesLeft = pCurMmio->RamRange.cb >> PAGE_SHIFT;
3474 while (cPagesLeft-- > 0)
3475 {
3476 PGM_PAGE_SET_TRACKING(pVM, pPageSrc, 0);
3477 PGM_PAGE_SET_PTE_INDEX(pVM, pPageSrc, 0);
3478 pPageSrc++;
3479 }
3480
3481 /* link in the ram range */
3482 pgmR3PhysLinkRamRange(pVM, &pCurMmio->RamRange, pRamPrev);
3483
3484 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3485 {
3486 Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
3487 break;
3488 }
3489 pRamPrev = &pCurMmio->RamRange;
3490 }
3491 }
3492
3493 /*
3494 * Register the access handler if plain MMIO.
3495 *
3496 * We must register access handlers for each range since the access handler
3497 * code refuses to deal with multiple ranges (and we can).
3498 */
3499 if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2))
3500 {
3501 int rc = VINF_SUCCESS;
3502 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3503 {
3504 Assert(!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED));
3505 rc = pgmHandlerPhysicalExRegister(pVM, pCurMmio->pPhysHandlerR3, pCurMmio->RamRange.GCPhys,
3506 pCurMmio->RamRange.GCPhysLast);
3507 if (RT_FAILURE(rc))
3508 break;
3509 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; /* Use this to mark that the handler is registered. */
3510 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3511 {
3512 rc = IOMR3MmioExNotifyMapped(pVM, pFirstMmio->pPhysHandlerR3->pvUserR3, GCPhys);
3513 break;
3514 }
3515 }
3516 if (RT_FAILURE(rc))
3517 {
3518 /* Almost impossible, but try clean up properly and get out of here. */
3519 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3520 {
3521 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
3522 {
3523 pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_MAPPED;
3524 pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3, fRamExists);
3525 }
3526
3527 if (!fRamExists)
3528 pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange);
3529 else
3530 {
3531 Assert(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */
3532
3533 uint32_t cPagesLeft = pCurMmio->RamRange.cb >> PAGE_SHIFT;
3534 PPGMPAGE pPageDst = &pRam->aPages[(pCurMmio->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3535 while (cPagesLeft-- > 0)
3536 {
3537 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
3538 pPageDst++;
3539 }
3540 }
3541
3542 pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS;
3543 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
3544 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3545 break;
3546 }
3547
3548 pgmUnlock(pVM);
3549 return rc;
3550 }
3551 }
3552
3553 /*
3554 * We're good, set the flags and invalid the mapping TLB.
3555 */
3556 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3557 {
3558 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED;
3559 if (fRamExists)
3560 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_OVERLAPPING;
3561 else
3562 pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_OVERLAPPING;
3563 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3564 break;
3565 }
3566 pgmPhysInvalidatePageMapTLB(pVM);
3567
3568 /*
3569 * Notify NEM while holding the lock (experimental) and REM without (like always).
3570 */
3571 uint32_t const fNemNotify = (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0)
3572 | (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_OVERLAPPING ? NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE : 0);
3573 int rc = NEMR3NotifyPhysMmioExMap(pVM, GCPhys, cbRange, fNemNotify, pFirstMmio->pvR3);
3574
3575 pgmUnlock(pVM);
3576
3577 return rc;
3578}
3579
3580
3581/**
3582 * Unmaps a MMIO2 or a pre-registered MMIO region.
3583 *
3584 * This is done when a guest / the bios / state loading changes the
3585 * PCI config. The replacing of base memory has the same restrictions
3586 * as during registration, of course.
3587 */
3588VMMR3_INT_DECL(int) PGMR3PhysMMIOExUnmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
3589 PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
3590{
3591 /*
3592 * Validate input
3593 */
3594 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3595 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3596 if (hMmio2 != NIL_PGMMMIO2HANDLE)
3597 {
3598 AssertReturn(iSubDev == UINT32_MAX, VERR_INVALID_PARAMETER);
3599 AssertReturn(iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
3600 }
3601 else
3602 {
3603 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3604 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3605 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
3606 }
3607 if (GCPhys != NIL_RTGCPHYS)
3608 {
3609 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
3610 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3611 }
3612
3613 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion, hMmio2);
3614 AssertReturn(pFirstMmio, VERR_NOT_FOUND);
3615 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
3616
3617 PPGMREGMMIO2RANGE pLastMmio = pFirstMmio;
3618 RTGCPHYS cbRange = 0;
3619 for (;;)
3620 {
3621 AssertReturn(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED, VERR_WRONG_ORDER);
3622 AssertReturn(pLastMmio->RamRange.GCPhys == GCPhys + cbRange || GCPhys == NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
3623 Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
3624 Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
3625 Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
3626 cbRange += pLastMmio->RamRange.cb;
3627 if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3628 break;
3629 pLastMmio = pLastMmio->pNextR3;
3630 }
3631
3632 Log(("PGMR3PhysMMIOExUnmap: %RGp-%RGp %s\n",
3633 pFirstMmio->RamRange.GCPhys, pLastMmio->RamRange.GCPhysLast, pFirstMmio->RamRange.pszDesc));
3634
3635 int rc = pgmLock(pVM);
3636 AssertRCReturn(rc, rc);
3637 uint16_t const fOldFlags = pFirstMmio->fFlags;
3638 AssertReturnStmt(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED, pgmUnlock(pVM), VERR_WRONG_ORDER);
3639
3640 /*
3641 * If plain MMIO, we must deregister the handlers first.
3642 */
3643 if (!(fOldFlags & PGMREGMMIO2RANGE_F_MMIO2))
3644 {
3645 PPGMREGMMIO2RANGE pCurMmio = pFirstMmio;
3646 rc = pgmHandlerPhysicalExDeregister(pVM, pFirstMmio->pPhysHandlerR3, RT_BOOL(fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING));
3647 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
3648 while (!(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK))
3649 {
3650 pCurMmio = pCurMmio->pNextR3;
3651 rc = pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3, RT_BOOL(fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING));
3652 AssertRCReturnStmt(rc, pgmUnlock(pVM), VERR_PGM_PHYS_MMIO_EX_IPE);
3653 }
3654
3655 IOMR3MmioExNotifyUnmapped(pVM, pFirstMmio->pPhysHandlerR3->pvUserR3, pFirstMmio->RamRange.GCPhys);
3656 }
3657
3658 /*
3659 * Unmap it.
3660 */
3661 RTGCPHYS const GCPhysRangeNotify = pFirstMmio->RamRange.GCPhys;
3662 if (fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING)
3663 {
3664 /*
3665 * We've replaced RAM, replace with zero pages.
3666 *
3667 * Note! This is where we might differ a little from a real system, because
3668 * it's likely to just show the RAM pages as they were before the
3669 * MMIO/MMIO2 region was mapped here.
3670 */
3671 /* Only one chunk allowed when overlapping! */
3672 Assert(fOldFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK);
3673
3674 /* Restore the RAM pages we've replaced. */
3675 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
3676 while (pRam->GCPhys > pFirstMmio->RamRange.GCPhysLast)
3677 pRam = pRam->pNextR3;
3678
3679 uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> PAGE_SHIFT;
3680 if (fOldFlags & PGMREGMMIO2RANGE_F_MMIO2)
3681 pVM->pgm.s.cZeroPages += cPagesLeft;
3682
3683 PPGMPAGE pPageDst = &pRam->aPages[(pFirstMmio->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3684 while (cPagesLeft-- > 0)
3685 {
3686 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
3687 pPageDst++;
3688 }
3689
3690 /* Flush physical page map TLB. */
3691 pgmPhysInvalidatePageMapTLB(pVM);
3692
3693 /* Update range state. */
3694 pFirstMmio->RamRange.GCPhys = NIL_RTGCPHYS;
3695 pFirstMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
3696 pFirstMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
3697 }
3698 else
3699 {
3700 /*
3701 * Unlink the chunks related to the MMIO/MMIO2 region.
3702 */
3703 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3704 {
3705 pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange);
3706 pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS;
3707 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
3708 pCurMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
3709 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3710 break;
3711 }
3712 }
3713
3714 /* Force a PGM pool flush as guest ram references have been changed. */
3715 /** @todo not entirely SMP safe; assuming for now the guest takes care
3716 * of this internally (not touch mapped mmio while changing the
3717 * mapping). */
3718 PVMCPU pVCpu = VMMGetCpu(pVM);
3719 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3720 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3721
3722 pgmPhysInvalidatePageMapTLB(pVM);
3723 pgmPhysInvalidRamRangeTlbs(pVM);
3724
3725 /*
3726 * Notify NEM while holding the lock (experimental) and REM without (like always).
3727 */
3728 uint32_t const fNemFlags = (fOldFlags & PGMREGMMIO2RANGE_F_MMIO2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0)
3729 | (fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING ? NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE : 0);
3730 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhysRangeNotify, cbRange, fNemFlags);
3731 pgmUnlock(pVM);
3732 return rc;
3733}
3734
3735
3736/**
3737 * Reduces the mapping size of a MMIO2 or pre-registered MMIO region.
3738 *
3739 * This is mainly for dealing with old saved states after changing the default
3740 * size of a mapping region. See PGMDevHlpMMIOExReduce and
3741 * PDMPCIDEV::pfnRegionLoadChangeHookR3.
3742 *
3743 * The region must not currently be mapped when making this call. The VM state
3744 * must be state restore or VM construction.
3745 *
3746 * @returns VBox status code.
3747 * @param pVM The cross context VM structure.
3748 * @param pDevIns The device instance owning the region.
3749 * @param iSubDev The sub-device number of the registered region.
3750 * Pass UINT32_MAX if @a hMmio2 is given.
3751 * @param iRegion The index of the registered region. Pass UINT32_MAX
3752 * if @a hMmio2 is given.
3753 * @param hMmio2 The handle of the region to reduce as an alternative
3754 * to @a iSubDev and @a iRegion, pass NIL to use the
3755 * other two.
3756 * @param cbRegion The new mapping size.
3757 */
3758VMMR3_INT_DECL(int) PGMR3PhysMMIOExReduce(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
3759 PGMMMIO2HANDLE hMmio2, RTGCPHYS cbRegion)
3760{
3761 /*
3762 * Validate input
3763 */
3764 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3765 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3766 if (hMmio2 != NIL_PGMMMIO2HANDLE)
3767 {
3768 AssertReturn(iSubDev == UINT32_MAX, VERR_INVALID_PARAMETER);
3769 AssertReturn(iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
3770 }
3771 else
3772 {
3773 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3774 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3775 }
3776 AssertReturn(cbRegion >= X86_PAGE_SIZE, VERR_INVALID_PARAMETER);
3777 AssertReturn(!(cbRegion & X86_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
3778 VMSTATE enmVmState = VMR3GetState(pVM);
3779 AssertLogRelMsgReturn( enmVmState == VMSTATE_CREATING
3780 || enmVmState == VMSTATE_LOADING,
3781 ("enmVmState=%d (%s)\n", enmVmState, VMR3GetStateName(enmVmState)),
3782 VERR_VM_INVALID_VM_STATE);
3783
3784 int rc = pgmLock(pVM);
3785 AssertRCReturn(rc, rc);
3786
3787 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion, hMmio2);
3788 if (pFirstMmio)
3789 {
3790 Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
3791 if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED))
3792 {
3793 /*
3794 * NOTE! Current implementation does not support multiple ranges.
3795 * Implement when there is a real world need and thus a testcase.
3796 */
3797 AssertLogRelMsgStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK,
3798 ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
3799 rc = VERR_NOT_SUPPORTED);
3800 if (RT_SUCCESS(rc))
3801 {
3802 /*
3803 * Make the change.
3804 */
3805 Log(("PGMR3PhysMMIOExReduce: %s changes from %RGp bytes (%RGp) to %RGp bytes.\n",
3806 pFirstMmio->RamRange.pszDesc, pFirstMmio->RamRange.cb, pFirstMmio->cbReal, cbRegion));
3807
3808 AssertLogRelMsgStmt(cbRegion <= pFirstMmio->cbReal,
3809 ("%s: cbRegion=%#RGp cbReal=%#RGp\n", pFirstMmio->RamRange.pszDesc, cbRegion, pFirstMmio->cbReal),
3810 rc = VERR_OUT_OF_RANGE);
3811 if (RT_SUCCESS(rc))
3812 {
3813 pFirstMmio->RamRange.cb = cbRegion;
3814 }
3815 }
3816 }
3817 else
3818 rc = VERR_WRONG_ORDER;
3819 }
3820 else
3821 rc = VERR_NOT_FOUND;
3822
3823 pgmUnlock(pVM);
3824 return rc;
3825}
3826
3827
3828/**
3829 * Validates @a hMmio2, making sure it belongs to @a pDevIns.
3830 *
3831 * @returns VBox status code.
3832 * @param pVM The cross context VM structure.
3833 * @param pDevIns The device which allegedly owns @a hMmio2.
3834 * @param hMmio2 The handle to validate.
3835 */
3836VMMR3_INT_DECL(int) PGMR3PhysMmio2ValidateHandle(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
3837{
3838 /*
3839 * Validate input
3840 */
3841 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3842 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
3843
3844 /*
3845 * Just do this the simple way. No need for locking as this is only taken at
3846 */
3847 pgmLock(pVM);
3848 PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
3849 pgmUnlock(pVM);
3850 AssertReturn(pFirstMmio, VERR_INVALID_HANDLE);
3851 AssertReturn(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2, VERR_INVALID_HANDLE);
3852 AssertReturn(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, VERR_INVALID_HANDLE);
3853 return VINF_SUCCESS;
3854}
3855
3856
3857/**
3858 * Checks if the given address is an MMIO2 or pre-registered MMIO base address
3859 * or not.
3860 *
3861 * @returns true/false accordingly.
3862 * @param pVM The cross context VM structure.
3863 * @param pDevIns The owner of the memory, optional.
3864 * @param GCPhys The address to check.
3865 */
3866VMMR3DECL(bool) PGMR3PhysMMIOExIsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
3867{
3868 /*
3869 * Validate input
3870 */
3871 VM_ASSERT_EMT_RETURN(pVM, false);
3872 AssertPtrReturn(pDevIns, false);
3873 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
3874 AssertReturn(GCPhys != 0, false);
3875 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
3876
3877 /*
3878 * Search the list.
3879 */
3880 pgmLock(pVM);
3881 for (PPGMREGMMIO2RANGE pCurMmio = pVM->pgm.s.pRegMmioRangesR3; pCurMmio; pCurMmio = pCurMmio->pNextR3)
3882 if (pCurMmio->RamRange.GCPhys == GCPhys)
3883 {
3884 Assert(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED);
3885 bool fRet = RT_BOOL(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
3886 pgmUnlock(pVM);
3887 return fRet;
3888 }
3889 pgmUnlock(pVM);
3890 return false;
3891}
3892
3893
3894/**
3895 * Gets the HC physical address of a page in the MMIO2 region.
3896 *
3897 * This is API is intended for MMHyper and shouldn't be called
3898 * by anyone else...
3899 *
3900 * @returns VBox status code.
3901 * @param pVM The cross context VM structure.
3902 * @param pDevIns The owner of the memory, optional.
3903 * @param iSubDev Sub-device number.
3904 * @param iRegion The region.
3905 * @param off The page expressed an offset into the MMIO2 region.
3906 * @param pHCPhys Where to store the result.
3907 */
3908VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
3909 RTGCPHYS off, PRTHCPHYS pHCPhys)
3910{
3911 /*
3912 * Validate input
3913 */
3914 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3915 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3916 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3917 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3918
3919 pgmLock(pVM);
3920 PPGMREGMMIO2RANGE pCurMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE);
3921 AssertReturn(pCurMmio, VERR_NOT_FOUND);
3922 AssertReturn(pCurMmio->fFlags & (PGMREGMMIO2RANGE_F_MMIO2 | PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_WRONG_TYPE);
3923
3924 while ( off >= pCurMmio->RamRange.cb
3925 && !(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK))
3926 {
3927 off -= pCurMmio->RamRange.cb;
3928 pCurMmio = pCurMmio->pNextR3;
3929 }
3930 AssertReturn(off < pCurMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3931
3932 PCPGMPAGE pPage = &pCurMmio->RamRange.aPages[off >> PAGE_SHIFT];
3933 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
3934 pgmUnlock(pVM);
3935 return VINF_SUCCESS;
3936}
3937
3938
3939/**
3940 * Maps a portion of an MMIO2 region into kernel space (host).
3941 *
3942 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
3943 * or the VM is terminated.
3944 *
3945 * @return VBox status code.
3946 *
3947 * @param pVM The cross context VM structure.
3948 * @param pDevIns The device owning the MMIO2 memory.
3949 * @param iSubDev The sub-device number.
3950 * @param iRegion The region.
3951 * @param off The offset into the region. Must be page aligned.
3952 * @param cb The number of bytes to map. Must be page aligned.
3953 * @param pszDesc Mapping description.
3954 * @param pR0Ptr Where to store the R0 address.
3955 */
3956VMMR3_INT_DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
3957 RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTR0PTR pR0Ptr)
3958{
3959 /*
3960 * Validate input.
3961 */
3962 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3963 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3964 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3965 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3966
3967 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE);
3968 AssertReturn(pFirstRegMmio, VERR_NOT_FOUND);
3969 AssertReturn(pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MMIO2 | PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_WRONG_TYPE);
3970 AssertReturn(off < pFirstRegMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3971 AssertReturn(cb <= pFirstRegMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3972 AssertReturn(off + cb <= pFirstRegMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3973 NOREF(pszDesc);
3974
3975 /*
3976 * Pass the request on to the support library/driver.
3977 */
3978#if defined(RT_OS_WINDOWS) || defined(RT_OS_LINUX) || defined(RT_OS_OS2) /** @todo Fully implement RTR0MemObjMapKernelEx everywhere. */
3979 AssertLogRelReturn(off == 0, VERR_NOT_SUPPORTED);
3980 AssertLogRelReturn(pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, VERR_NOT_SUPPORTED);
3981 int rc = SUPR3PageMapKernel(pFirstRegMmio->pvR3, 0 /*off*/, pFirstRegMmio->RamRange.cb, 0 /*fFlags*/, pR0Ptr);
3982#else
3983 int rc = SUPR3PageMapKernel(pFirstRegMmio->pvR3, off, cb, 0 /*fFlags*/, pR0Ptr);
3984#endif
3985
3986 return rc;
3987}
3988
3989
3990/**
3991 * Gets the mapping address of an MMIO2 region.
3992 *
3993 * @returns Mapping address, NIL_RTGCPHYS if not mapped or invalid handle.
3994 *
3995 * @param pVM The cross context VM structure.
3996 * @param pDevIns The device owning the MMIO2 handle.
3997 * @param hMmio2 The region handle.
3998 */
3999VMMR3_INT_DECL(RTGCPHYS) PGMR3PhysMmio2GetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
4000{
4001 AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
4002
4003 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
4004 AssertReturn(pFirstRegMmio, NIL_RTGCPHYS);
4005
4006 if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
4007 return pFirstRegMmio->RamRange.GCPhys;
4008 return NIL_RTGCPHYS;
4009}
4010
4011/**
4012 * Changes the region number of an MMIO2 or pre-registered MMIO region.
4013 *
4014 * This is only for dealing with save state issues, nothing else.
4015 *
4016 * @return VBox status code.
4017 *
4018 * @param pVM The cross context VM structure.
4019 * @param pDevIns The device owning the MMIO2 memory.
4020 * @param iSubDev The sub-device number. Pass UINT32_MAX if @a hMmio2 is
4021 * given.
4022 * @param iRegion The region. Pass UINT32_MAX if @a hMmio2 is given.
4023 * @param hMmio2 The handle of the region to map as an alternative to
4024 * @a iSubDev and @a iRegion, pass NIL to use the
4025 * other two.
4026 * @param iNewRegion The new region index.
4027 *
4028 * @thread EMT(0)
4029 * @sa @bugref{9359}
4030 */
4031VMMR3_INT_DECL(int) PGMR3PhysMMIOExChangeRegionNo(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
4032 PGMMMIO2HANDLE hMmio2, uint32_t iNewRegion)
4033{
4034 /*
4035 * Validate input.
4036 */
4037 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
4038 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
4039 if (hMmio2 != NIL_PGMMMIO2HANDLE)
4040 {
4041 AssertReturn(iSubDev == UINT32_MAX, VERR_INVALID_PARAMETER);
4042 AssertReturn(iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
4043 }
4044 else
4045 {
4046 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
4047 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
4048 }
4049 AssertReturn(iNewRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
4050
4051 AssertReturn(pVM->enmVMState == VMSTATE_LOADING, VERR_INVALID_STATE);
4052
4053 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion, hMmio2);
4054 AssertReturn(pFirstRegMmio, VERR_NOT_FOUND);
4055 AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, pFirstRegMmio->iSubDev, iNewRegion, NIL_PGMMMIO2HANDLE) == NULL,
4056 VERR_RESOURCE_IN_USE);
4057
4058 /*
4059 * Make the change.
4060 */
4061 pFirstRegMmio->iRegion = (uint8_t)iNewRegion;
4062
4063 return VINF_SUCCESS;
4064}
4065
4066
4067/**
4068 * Worker for PGMR3PhysRomRegister.
4069 *
4070 * This is here to simplify lock management, i.e. the caller does all the
4071 * locking and we can simply return without needing to remember to unlock
4072 * anything first.
4073 *
4074 * @returns VBox status code.
4075 * @param pVM The cross context VM structure.
4076 * @param pDevIns The device instance owning the ROM.
4077 * @param GCPhys First physical address in the range.
4078 * Must be page aligned!
4079 * @param cb The size of the range (in bytes).
4080 * Must be page aligned!
4081 * @param pvBinary Pointer to the binary data backing the ROM image.
4082 * @param cbBinary The size of the binary data pvBinary points to.
4083 * This must be less or equal to @a cb.
4084 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
4085 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
4086 * @param pszDesc Pointer to description string. This must not be freed.
4087 */
4088static int pgmR3PhysRomRegisterLocked(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
4089 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
4090{
4091 /*
4092 * Validate input.
4093 */
4094 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
4095 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
4096 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
4097 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
4098 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
4099 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
4100 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
4101 AssertReturn(!(fFlags & ~PGMPHYS_ROM_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
4102 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
4103
4104 const uint32_t cPages = cb >> PAGE_SHIFT;
4105
4106 /*
4107 * Find the ROM location in the ROM list first.
4108 */
4109 PPGMROMRANGE pRomPrev = NULL;
4110 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
4111 while (pRom && GCPhysLast >= pRom->GCPhys)
4112 {
4113 if ( GCPhys <= pRom->GCPhysLast
4114 && GCPhysLast >= pRom->GCPhys)
4115 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
4116 GCPhys, GCPhysLast, pszDesc,
4117 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
4118 VERR_PGM_RAM_CONFLICT);
4119 /* next */
4120 pRomPrev = pRom;
4121 pRom = pRom->pNextR3;
4122 }
4123
4124 /*
4125 * Find the RAM location and check for conflicts.
4126 *
4127 * Conflict detection is a bit different than for RAM
4128 * registration since a ROM can be located within a RAM
4129 * range. So, what we have to check for is other memory
4130 * types (other than RAM that is) and that we don't span
4131 * more than one RAM range (layz).
4132 */
4133 bool fRamExists = false;
4134 PPGMRAMRANGE pRamPrev = NULL;
4135 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
4136 while (pRam && GCPhysLast >= pRam->GCPhys)
4137 {
4138 if ( GCPhys <= pRam->GCPhysLast
4139 && GCPhysLast >= pRam->GCPhys)
4140 {
4141 /* completely within? */
4142 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
4143 && GCPhysLast <= pRam->GCPhysLast,
4144 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
4145 GCPhys, GCPhysLast, pszDesc,
4146 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
4147 VERR_PGM_RAM_CONFLICT);
4148 fRamExists = true;
4149 break;
4150 }
4151
4152 /* next */
4153 pRamPrev = pRam;
4154 pRam = pRam->pNextR3;
4155 }
4156 if (fRamExists)
4157 {
4158 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
4159 uint32_t cPagesLeft = cPages;
4160 while (cPagesLeft-- > 0)
4161 {
4162 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
4163 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
4164 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
4165 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
4166 Assert(PGM_PAGE_IS_ZERO(pPage));
4167 pPage++;
4168 }
4169 }
4170
4171 /*
4172 * Update the base memory reservation if necessary.
4173 */
4174 uint32_t cExtraBaseCost = fRamExists ? 0 : cPages;
4175 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4176 cExtraBaseCost += cPages;
4177 if (cExtraBaseCost)
4178 {
4179 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
4180 if (RT_FAILURE(rc))
4181 return rc;
4182 }
4183
4184 /*
4185 * Allocate memory for the virgin copy of the RAM.
4186 */
4187 PGMMALLOCATEPAGESREQ pReq;
4188 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
4189 AssertRCReturn(rc, rc);
4190
4191 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4192 {
4193 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
4194 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
4195 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
4196 }
4197
4198 rc = GMMR3AllocatePagesPerform(pVM, pReq);
4199 if (RT_FAILURE(rc))
4200 {
4201 GMMR3AllocatePagesCleanup(pReq);
4202 return rc;
4203 }
4204
4205 /*
4206 * Allocate the new ROM range and RAM range (if necessary).
4207 */
4208 PPGMROMRANGE pRomNew;
4209 rc = MMHyperAlloc(pVM, RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
4210 if (RT_SUCCESS(rc))
4211 {
4212 PPGMRAMRANGE pRamNew = NULL;
4213 if (!fRamExists)
4214 rc = MMHyperAlloc(pVM, RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
4215 if (RT_SUCCESS(rc))
4216 {
4217 /*
4218 * Initialize and insert the RAM range (if required).
4219 */
4220 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
4221 if (!fRamExists)
4222 {
4223 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
4224 pRamNew->GCPhys = GCPhys;
4225 pRamNew->GCPhysLast = GCPhysLast;
4226 pRamNew->cb = cb;
4227 pRamNew->pszDesc = pszDesc;
4228 pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
4229 pRamNew->pvR3 = NULL;
4230 pRamNew->paLSPages = NULL;
4231
4232 PPGMPAGE pPage = &pRamNew->aPages[0];
4233 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
4234 {
4235 PGM_PAGE_INIT(pPage,
4236 pReq->aPages[iPage].HCPhysGCPhys,
4237 pReq->aPages[iPage].idPage,
4238 PGMPAGETYPE_ROM,
4239 PGM_PAGE_STATE_ALLOCATED);
4240
4241 pRomPage->Virgin = *pPage;
4242 }
4243
4244 pVM->pgm.s.cAllPages += cPages;
4245 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
4246 }
4247 else
4248 {
4249 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
4250 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
4251 {
4252 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_ROM);
4253 PGM_PAGE_SET_HCPHYS(pVM, pPage, pReq->aPages[iPage].HCPhysGCPhys);
4254 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
4255 PGM_PAGE_SET_PAGEID(pVM, pPage, pReq->aPages[iPage].idPage);
4256 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
4257 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
4258 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
4259
4260 pRomPage->Virgin = *pPage;
4261 }
4262
4263 pRamNew = pRam;
4264
4265 pVM->pgm.s.cZeroPages -= cPages;
4266 }
4267 pVM->pgm.s.cPrivatePages += cPages;
4268
4269 /* Flush physical page map TLB. */
4270 pgmPhysInvalidatePageMapTLB(pVM);
4271
4272
4273 /* Notify NEM before we register handlers. */
4274 uint32_t const fNemNotify = (fRamExists ? NEM_NOTIFY_PHYS_ROM_F_REPLACE : 0)
4275 | (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? NEM_NOTIFY_PHYS_ROM_F_SHADOW : 0);
4276 rc = NEMR3NotifyPhysRomRegisterEarly(pVM, GCPhys, cb, fNemNotify);
4277
4278 /* Register the ROM access handler. */
4279 if (RT_SUCCESS(rc))
4280 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType,
4281 pRomNew, MMHyperCCToR0(pVM, pRomNew), MMHyperCCToRC(pVM, pRomNew),
4282 pszDesc);
4283 if (RT_SUCCESS(rc))
4284 {
4285 /*
4286 * Copy the image over to the virgin pages.
4287 * This must be done after linking in the RAM range.
4288 */
4289 size_t cbBinaryLeft = cbBinary;
4290 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
4291 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
4292 {
4293 void *pvDstPage;
4294 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pvDstPage);
4295 if (RT_FAILURE(rc))
4296 {
4297 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
4298 break;
4299 }
4300 if (cbBinaryLeft >= PAGE_SIZE)
4301 {
4302 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), PAGE_SIZE);
4303 cbBinaryLeft -= PAGE_SIZE;
4304 }
4305 else
4306 {
4307 ASMMemZeroPage(pvDstPage); /* (shouldn't be necessary, but can't hurt either) */
4308 if (cbBinaryLeft > 0)
4309 {
4310 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), cbBinaryLeft);
4311 cbBinaryLeft = 0;
4312 }
4313 }
4314 }
4315 if (RT_SUCCESS(rc))
4316 {
4317 /*
4318 * Initialize the ROM range.
4319 * Note that the Virgin member of the pages has already been initialized above.
4320 */
4321 pRomNew->GCPhys = GCPhys;
4322 pRomNew->GCPhysLast = GCPhysLast;
4323 pRomNew->cb = cb;
4324 pRomNew->fFlags = fFlags;
4325 pRomNew->idSavedState = UINT8_MAX;
4326 pRomNew->cbOriginal = cbBinary;
4327 pRomNew->pszDesc = pszDesc;
4328 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY
4329 ? pvBinary : RTMemDup(pvBinary, cbBinary);
4330 if (pRomNew->pvOriginal)
4331 {
4332 for (unsigned iPage = 0; iPage < cPages; iPage++)
4333 {
4334 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
4335 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
4336 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
4337 }
4338
4339 /* update the page count stats for the shadow pages. */
4340 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4341 {
4342 pVM->pgm.s.cZeroPages += cPages;
4343 pVM->pgm.s.cAllPages += cPages;
4344 }
4345
4346 /*
4347 * Insert the ROM range, tell REM and return successfully.
4348 */
4349 pRomNew->pNextR3 = pRom;
4350 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
4351
4352 if (pRomPrev)
4353 {
4354 pRomPrev->pNextR3 = pRomNew;
4355 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
4356 }
4357 else
4358 {
4359 pVM->pgm.s.pRomRangesR3 = pRomNew;
4360 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
4361 }
4362
4363 pgmPhysInvalidatePageMapTLB(pVM);
4364 GMMR3AllocatePagesCleanup(pReq);
4365
4366 /* Notify NEM again. */
4367 return NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, fNemNotify);
4368 }
4369
4370 /* bail out */
4371 rc = VERR_NO_MEMORY;
4372 }
4373
4374 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
4375 AssertRC(rc2);
4376 }
4377
4378 if (!fRamExists)
4379 {
4380 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
4381 MMHyperFree(pVM, pRamNew);
4382 }
4383 }
4384 MMHyperFree(pVM, pRomNew);
4385 }
4386
4387 /** @todo Purge the mapping cache or something... */
4388 GMMR3FreeAllocatedPages(pVM, pReq);
4389 GMMR3AllocatePagesCleanup(pReq);
4390 return rc;
4391}
4392
4393
4394/**
4395 * Registers a ROM image.
4396 *
4397 * Shadowed ROM images requires double the amount of backing memory, so,
4398 * don't use that unless you have to. Shadowing of ROM images is process
4399 * where we can select where the reads go and where the writes go. On real
4400 * hardware the chipset provides means to configure this. We provide
4401 * PGMR3PhysProtectROM() for this purpose.
4402 *
4403 * A read-only copy of the ROM image will always be kept around while we
4404 * will allocate RAM pages for the changes on demand (unless all memory
4405 * is configured to be preallocated).
4406 *
4407 * @returns VBox status code.
4408 * @param pVM The cross context VM structure.
4409 * @param pDevIns The device instance owning the ROM.
4410 * @param GCPhys First physical address in the range.
4411 * Must be page aligned!
4412 * @param cb The size of the range (in bytes).
4413 * Must be page aligned!
4414 * @param pvBinary Pointer to the binary data backing the ROM image.
4415 * @param cbBinary The size of the binary data pvBinary points to.
4416 * This must be less or equal to @a cb.
4417 * @param fFlags Mask of flags, PGMPHYS_ROM_FLAGS_XXX.
4418 * @param pszDesc Pointer to description string. This must not be freed.
4419 *
4420 * @remark There is no way to remove the rom, automatically on device cleanup or
4421 * manually from the device yet. This isn't difficult in any way, it's
4422 * just not something we expect to be necessary for a while.
4423 */
4424VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
4425 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
4426{
4427 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p cbBinary=%#x fFlags=%#x pszDesc=%s\n",
4428 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc));
4429 pgmLock(pVM);
4430 int rc = pgmR3PhysRomRegisterLocked(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc);
4431 pgmUnlock(pVM);
4432 return rc;
4433}
4434
4435
4436/**
4437 * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify
4438 * that the virgin part is untouched.
4439 *
4440 * This is done after the normal memory has been cleared.
4441 *
4442 * ASSUMES that the caller owns the PGM lock.
4443 *
4444 * @param pVM The cross context VM structure.
4445 */
4446int pgmR3PhysRomReset(PVM pVM)
4447{
4448 PGM_LOCK_ASSERT_OWNER(pVM);
4449 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
4450 {
4451 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
4452
4453 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4454 {
4455 /*
4456 * Reset the physical handler.
4457 */
4458 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
4459 AssertRCReturn(rc, rc);
4460
4461 /*
4462 * What we do with the shadow pages depends on the memory
4463 * preallocation option. If not enabled, we'll just throw
4464 * out all the dirty pages and replace them by the zero page.
4465 */
4466 if (!pVM->pgm.s.fRamPreAlloc)
4467 {
4468 /* Free the dirty pages. */
4469 uint32_t cPendingPages = 0;
4470 PGMMFREEPAGESREQ pReq;
4471 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
4472 AssertRCReturn(rc, rc);
4473
4474 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4475 if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
4476 && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
4477 {
4478 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
4479 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow,
4480 pRom->GCPhys + (iPage << PAGE_SHIFT),
4481 (PGMPAGETYPE)PGM_PAGE_GET_TYPE(&pRom->aPages[iPage].Shadow));
4482 AssertLogRelRCReturn(rc, rc);
4483 }
4484
4485 if (cPendingPages)
4486 {
4487 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
4488 AssertLogRelRCReturn(rc, rc);
4489 }
4490 GMMR3FreePagesCleanup(pReq);
4491 }
4492 else
4493 {
4494 /* clear all the shadow pages. */
4495 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4496 {
4497 if (PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow))
4498 continue;
4499 Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
4500 void *pvDstPage;
4501 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
4502 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
4503 if (RT_FAILURE(rc))
4504 break;
4505 ASMMemZeroPage(pvDstPage);
4506 }
4507 AssertRCReturn(rc, rc);
4508 }
4509 }
4510
4511 /*
4512 * Restore the original ROM pages after a saved state load.
4513 * Also, in strict builds check that ROM pages remain unmodified.
4514 */
4515#ifndef VBOX_STRICT
4516 if (pVM->pgm.s.fRestoreRomPagesOnReset)
4517#endif
4518 {
4519 size_t cbSrcLeft = pRom->cbOriginal;
4520 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
4521 uint32_t cRestored = 0;
4522 for (uint32_t iPage = 0; iPage < cPages && cbSrcLeft > 0; iPage++, pbSrcPage += PAGE_SIZE)
4523 {
4524 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
4525 void const *pvDstPage;
4526 int rc = pgmPhysPageMapReadOnly(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPage);
4527 if (RT_FAILURE(rc))
4528 break;
4529
4530 if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE)))
4531 {
4532 if (pVM->pgm.s.fRestoreRomPagesOnReset)
4533 {
4534 void *pvDstPageW;
4535 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPageW);
4536 AssertLogRelRCReturn(rc, rc);
4537 memcpy(pvDstPageW, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE));
4538 cRestored++;
4539 }
4540 else
4541 LogRel(("pgmR3PhysRomReset: %RGp: ROM page changed (%s)\n", GCPhys, pRom->pszDesc));
4542 }
4543 cbSrcLeft -= RT_MIN(cbSrcLeft, PAGE_SIZE);
4544 }
4545 if (cRestored > 0)
4546 LogRel(("PGM: ROM \"%s\": Reloaded %u of %u pages.\n", pRom->pszDesc, cRestored, cPages));
4547 }
4548 }
4549
4550 /* Clear the ROM restore flag now as we only need to do this once after
4551 loading saved state. */
4552 pVM->pgm.s.fRestoreRomPagesOnReset = false;
4553
4554 return VINF_SUCCESS;
4555}
4556
4557
4558/**
4559 * Called by PGMR3Term to free resources.
4560 *
4561 * ASSUMES that the caller owns the PGM lock.
4562 *
4563 * @param pVM The cross context VM structure.
4564 */
4565void pgmR3PhysRomTerm(PVM pVM)
4566{
4567 /*
4568 * Free the heap copy of the original bits.
4569 */
4570 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
4571 {
4572 if ( pRom->pvOriginal
4573 && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY))
4574 {
4575 RTMemFree((void *)pRom->pvOriginal);
4576 pRom->pvOriginal = NULL;
4577 }
4578 }
4579}
4580
4581
4582/**
4583 * Change the shadowing of a range of ROM pages.
4584 *
4585 * This is intended for implementing chipset specific memory registers
4586 * and will not be very strict about the input. It will silently ignore
4587 * any pages that are not the part of a shadowed ROM.
4588 *
4589 * @returns VBox status code.
4590 * @retval VINF_PGM_SYNC_CR3
4591 *
4592 * @param pVM The cross context VM structure.
4593 * @param GCPhys Where to start. Page aligned.
4594 * @param cb How much to change. Page aligned.
4595 * @param enmProt The new ROM protection.
4596 */
4597VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
4598{
4599 /*
4600 * Check input
4601 */
4602 if (!cb)
4603 return VINF_SUCCESS;
4604 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4605 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4606 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
4607 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
4608 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
4609
4610 /*
4611 * Process the request.
4612 */
4613 pgmLock(pVM);
4614 int rc = VINF_SUCCESS;
4615 bool fFlushTLB = false;
4616 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
4617 {
4618 if ( GCPhys <= pRom->GCPhysLast
4619 && GCPhysLast >= pRom->GCPhys
4620 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
4621 {
4622 /*
4623 * Iterate the relevant pages and make necessary the changes.
4624 */
4625 bool fChanges = false;
4626 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
4627 ? pRom->cb >> PAGE_SHIFT
4628 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
4629 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
4630 iPage < cPages;
4631 iPage++)
4632 {
4633 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
4634 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
4635 {
4636 fChanges = true;
4637
4638 /* flush references to the page. */
4639 PPGMPAGE pRamPage = pgmPhysGetPage(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT));
4640 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage,
4641 true /*fFlushPTEs*/, &fFlushTLB);
4642 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
4643 rc = rc2;
4644 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pRamPage);
4645
4646 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
4647 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
4648
4649 *pOld = *pRamPage;
4650 *pRamPage = *pNew;
4651 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
4652
4653 /* Tell NEM about the backing and protection change. */
4654 if (VM_IS_NEM_ENABLED(pVM))
4655 {
4656 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pNew);
4657 NEMHCNotifyPhysPageChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pOld), PGM_PAGE_GET_HCPHYS(pNew),
4658 pgmPhysPageCalcNemProtection(pRamPage, enmType), enmType, &u2State);
4659 PGM_PAGE_SET_NEM_STATE(pRamPage, u2State);
4660 }
4661 }
4662 pRomPage->enmProt = enmProt;
4663 }
4664
4665 /*
4666 * Reset the access handler if we made changes, no need
4667 * to optimize this.
4668 */
4669 if (fChanges)
4670 {
4671 int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
4672 if (RT_FAILURE(rc2))
4673 {
4674 pgmUnlock(pVM);
4675 AssertRC(rc);
4676 return rc2;
4677 }
4678 }
4679
4680 /* Advance - cb isn't updated. */
4681 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
4682 }
4683 }
4684 pgmUnlock(pVM);
4685 if (fFlushTLB)
4686 PGM_INVL_ALL_VCPU_TLBS(pVM);
4687
4688 return rc;
4689}
4690
4691
4692/**
4693 * Sets the Address Gate 20 state.
4694 *
4695 * @param pVCpu The cross context virtual CPU structure.
4696 * @param fEnable True if the gate should be enabled.
4697 * False if the gate should be disabled.
4698 */
4699VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
4700{
4701 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
4702 if (pVCpu->pgm.s.fA20Enabled != fEnable)
4703 {
4704#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4705 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4706 if ( CPUMIsGuestInVmxRootMode(pCtx)
4707 && !fEnable)
4708 {
4709 Log(("Cannot enter A20M mode while in VMX root mode\n"));
4710 return;
4711 }
4712#endif
4713 pVCpu->pgm.s.fA20Enabled = fEnable;
4714 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
4715 NEMR3NotifySetA20(pVCpu, fEnable);
4716#ifdef PGM_WITH_A20
4717 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
4718 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
4719 HMFlushTlb(pVCpu);
4720#endif
4721 IEMTlbInvalidateAllPhysical(pVCpu);
4722 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
4723 }
4724}
4725
4726
4727/**
4728 * Tree enumeration callback for dealing with age rollover.
4729 * It will perform a simple compression of the current age.
4730 */
4731static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
4732{
4733 /* Age compression - ASSUMES iNow == 4. */
4734 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
4735 if (pChunk->iLastUsed >= UINT32_C(0xffffff00))
4736 pChunk->iLastUsed = 3;
4737 else if (pChunk->iLastUsed >= UINT32_C(0xfffff000))
4738 pChunk->iLastUsed = 2;
4739 else if (pChunk->iLastUsed)
4740 pChunk->iLastUsed = 1;
4741 else /* iLastUsed = 0 */
4742 pChunk->iLastUsed = 4;
4743
4744 NOREF(pvUser);
4745 return 0;
4746}
4747
4748
4749/**
4750 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
4751 */
4752typedef struct PGMR3PHYSCHUNKUNMAPCB
4753{
4754 PVM pVM; /**< Pointer to the VM. */
4755 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
4756} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
4757
4758
4759/**
4760 * Callback used to find the mapping that's been unused for
4761 * the longest time.
4762 */
4763static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLU32NODECORE pNode, void *pvUser)
4764{
4765 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
4766 PPGMR3PHYSCHUNKUNMAPCB pArg = (PPGMR3PHYSCHUNKUNMAPCB)pvUser;
4767
4768 /*
4769 * Check for locks and compare when last used.
4770 */
4771 if (pChunk->cRefs)
4772 return 0;
4773 if (pChunk->cPermRefs)
4774 return 0;
4775 if ( pArg->pChunk
4776 && pChunk->iLastUsed >= pArg->pChunk->iLastUsed)
4777 return 0;
4778
4779 /*
4780 * Check that it's not in any of the TLBs.
4781 */
4782 PVM pVM = pArg->pVM;
4783 if ( pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(pChunk->Core.Key)].idChunk
4784 == pChunk->Core.Key)
4785 {
4786 pChunk = NULL;
4787 return 0;
4788 }
4789#ifdef VBOX_STRICT
4790 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
4791 {
4792 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk != pChunk);
4793 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key);
4794 }
4795#endif
4796
4797 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
4798 if (pVM->pgm.s.PhysTlbR0.aEntries[i].pMap == pChunk)
4799 return 0;
4800 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
4801 if (pVM->pgm.s.PhysTlbR3.aEntries[i].pMap == pChunk)
4802 return 0;
4803
4804 pArg->pChunk = pChunk;
4805 return 0;
4806}
4807
4808
4809/**
4810 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
4811 *
4812 * The candidate will not be part of any TLBs, so no need to flush
4813 * anything afterwards.
4814 *
4815 * @returns Chunk id.
4816 * @param pVM The cross context VM structure.
4817 */
4818static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
4819{
4820 PGM_LOCK_ASSERT_OWNER(pVM);
4821
4822 /*
4823 * Enumerate the age tree starting with the left most node.
4824 */
4825 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
4826 PGMR3PHYSCHUNKUNMAPCB Args;
4827 Args.pVM = pVM;
4828 Args.pChunk = NULL;
4829 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, &Args);
4830 Assert(Args.pChunk);
4831 if (Args.pChunk)
4832 {
4833 Assert(Args.pChunk->cRefs == 0);
4834 Assert(Args.pChunk->cPermRefs == 0);
4835 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
4836 return Args.pChunk->Core.Key;
4837 }
4838
4839 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
4840 return INT32_MAX;
4841}
4842
4843
4844/**
4845 * Rendezvous callback used by pgmR3PhysUnmapChunk that unmaps a chunk
4846 *
4847 * This is only called on one of the EMTs while the other ones are waiting for
4848 * it to complete this function.
4849 *
4850 * @returns VINF_SUCCESS (VBox strict status code).
4851 * @param pVM The cross context VM structure.
4852 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
4853 * @param pvUser User pointer. Unused
4854 *
4855 */
4856static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysUnmapChunkRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
4857{
4858 int rc = VINF_SUCCESS;
4859 pgmLock(pVM);
4860 NOREF(pVCpu); NOREF(pvUser);
4861
4862 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
4863 {
4864 /* Flush the pgm pool cache; call the internal rendezvous handler as we're already in a rendezvous handler here. */
4865 /** @todo also not really efficient to unmap a chunk that contains PD
4866 * or PT pages. */
4867 pgmR3PoolClearAllRendezvous(pVM, pVM->apCpusR3[0], NULL /* no need to flush the REM TLB as we already did that above */);
4868
4869 /*
4870 * Request the ring-0 part to unmap a chunk to make space in the mapping cache.
4871 */
4872 GMMMAPUNMAPCHUNKREQ Req;
4873 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
4874 Req.Hdr.cbReq = sizeof(Req);
4875 Req.pvR3 = NULL;
4876 Req.idChunkMap = NIL_GMM_CHUNKID;
4877 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
4878 if (Req.idChunkUnmap != INT32_MAX)
4879 {
4880 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
4881 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
4882 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
4883 if (RT_SUCCESS(rc))
4884 {
4885 /*
4886 * Remove the unmapped one.
4887 */
4888 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
4889 AssertRelease(pUnmappedChunk);
4890 AssertRelease(!pUnmappedChunk->cRefs);
4891 AssertRelease(!pUnmappedChunk->cPermRefs);
4892 pUnmappedChunk->pv = NULL;
4893 pUnmappedChunk->Core.Key = UINT32_MAX;
4894#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
4895 MMR3HeapFree(pUnmappedChunk);
4896#else
4897 MMR3UkHeapFree(pVM, pUnmappedChunk, MM_TAG_PGM_CHUNK_MAPPING);
4898#endif
4899 pVM->pgm.s.ChunkR3Map.c--;
4900 pVM->pgm.s.cUnmappedChunks++;
4901
4902 /*
4903 * Flush dangling PGM pointers (R3 & R0 ptrs to GC physical addresses).
4904 */
4905 /** @todo We should not flush chunks which include cr3 mappings. */
4906 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4907 {
4908 PPGMCPU pPGM = &pVM->apCpusR3[idCpu]->pgm.s;
4909
4910 pPGM->pGst32BitPdR3 = NULL;
4911 pPGM->pGstPaePdptR3 = NULL;
4912 pPGM->pGstAmd64Pml4R3 = NULL;
4913#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4914 pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
4915 pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
4916 pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
4917#endif
4918 for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++)
4919 {
4920 pPGM->apGstPaePDsR3[i] = NULL;
4921#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4922 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
4923#endif
4924 }
4925
4926 /* Flush REM TLBs. */
4927 CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
4928 }
4929 }
4930 }
4931 }
4932 pgmUnlock(pVM);
4933 return rc;
4934}
4935
4936/**
4937 * Unmap a chunk to free up virtual address space (request packet handler for pgmR3PhysChunkMap)
4938 *
4939 * @returns VBox status code.
4940 * @param pVM The cross context VM structure.
4941 */
4942void pgmR3PhysUnmapChunk(PVM pVM)
4943{
4944 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysUnmapChunkRendezvous, NULL);
4945 AssertRC(rc);
4946}
4947
4948
4949/**
4950 * Maps the given chunk into the ring-3 mapping cache.
4951 *
4952 * This will call ring-0.
4953 *
4954 * @returns VBox status code.
4955 * @param pVM The cross context VM structure.
4956 * @param idChunk The chunk in question.
4957 * @param ppChunk Where to store the chunk tracking structure.
4958 *
4959 * @remarks Called from within the PGM critical section.
4960 * @remarks Can be called from any thread!
4961 */
4962int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
4963{
4964 int rc;
4965
4966 PGM_LOCK_ASSERT_OWNER(pVM);
4967
4968 /*
4969 * Move the chunk time forward.
4970 */
4971 pVM->pgm.s.ChunkR3Map.iNow++;
4972 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
4973 {
4974 pVM->pgm.s.ChunkR3Map.iNow = 4;
4975 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
4976 }
4977
4978 /*
4979 * Allocate a new tracking structure first.
4980 */
4981#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
4982 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
4983#else
4984 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
4985#endif
4986 AssertReturn(pChunk, VERR_NO_MEMORY);
4987 pChunk->Core.Key = idChunk;
4988 pChunk->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
4989
4990 /*
4991 * Request the ring-0 part to map the chunk in question.
4992 */
4993 GMMMAPUNMAPCHUNKREQ Req;
4994 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
4995 Req.Hdr.cbReq = sizeof(Req);
4996 Req.pvR3 = NULL;
4997 Req.idChunkMap = idChunk;
4998 Req.idChunkUnmap = NIL_GMM_CHUNKID;
4999
5000 /* Must be callable from any thread, so can't use VMMR3CallR0. */
5001 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
5002 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
5003 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
5004 if (RT_SUCCESS(rc))
5005 {
5006 pChunk->pv = Req.pvR3;
5007
5008 /*
5009 * If we're running out of virtual address space, then we should
5010 * unmap another chunk.
5011 *
5012 * Currently, an unmap operation requires that all other virtual CPUs
5013 * are idling and not by chance making use of the memory we're
5014 * unmapping. So, we create an async unmap operation here.
5015 *
5016 * Now, when creating or restoring a saved state this wont work very
5017 * well since we may want to restore all guest RAM + a little something.
5018 * So, we have to do the unmap synchronously. Fortunately for us
5019 * though, during these operations the other virtual CPUs are inactive
5020 * and it should be safe to do this.
5021 */
5022 /** @todo Eventually we should lock all memory when used and do
5023 * map+unmap as one kernel call without any rendezvous or
5024 * other precautions. */
5025 if (pVM->pgm.s.ChunkR3Map.c + 1 >= pVM->pgm.s.ChunkR3Map.cMax)
5026 {
5027 switch (VMR3GetState(pVM))
5028 {
5029 case VMSTATE_LOADING:
5030 case VMSTATE_SAVING:
5031 {
5032 PVMCPU pVCpu = VMMGetCpu(pVM);
5033 if ( pVCpu
5034 && pVM->pgm.s.cDeprecatedPageLocks == 0)
5035 {
5036 pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
5037 break;
5038 }
5039 }
5040 RT_FALL_THRU();
5041 default:
5042 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
5043 AssertRC(rc);
5044 break;
5045 }
5046 }
5047
5048 /*
5049 * Update the tree. We must do this after any unmapping to make sure
5050 * the chunk we're going to return isn't unmapped by accident.
5051 */
5052 AssertPtr(Req.pvR3);
5053 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
5054 AssertRelease(fRc);
5055 pVM->pgm.s.ChunkR3Map.c++;
5056 pVM->pgm.s.cMappedChunks++;
5057 }
5058 else
5059 {
5060 /** @todo this may fail because of /proc/sys/vm/max_map_count, so we
5061 * should probably restrict ourselves on linux. */
5062 AssertRC(rc);
5063#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
5064 MMR3HeapFree(pChunk);
5065#else
5066 MMR3UkHeapFree(pVM, pChunk, MM_TAG_PGM_CHUNK_MAPPING);
5067#endif
5068 pChunk = NULL;
5069 }
5070
5071 *ppChunk = pChunk;
5072 return rc;
5073}
5074
5075
5076/**
5077 * For VMMCALLRING3_PGM_MAP_CHUNK, considered internal.
5078 *
5079 * @returns see pgmR3PhysChunkMap.
5080 * @param pVM The cross context VM structure.
5081 * @param idChunk The chunk to map.
5082 */
5083VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
5084{
5085 PPGMCHUNKR3MAP pChunk;
5086 int rc;
5087
5088 pgmLock(pVM);
5089 rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
5090 pgmUnlock(pVM);
5091 return rc;
5092}
5093
5094
5095/**
5096 * Invalidates the TLB for the ring-3 mapping cache.
5097 *
5098 * @param pVM The cross context VM structure.
5099 */
5100VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
5101{
5102 pgmLock(pVM);
5103 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
5104 {
5105 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
5106 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
5107 }
5108 /* The page map TLB references chunks, so invalidate that one too. */
5109 pgmPhysInvalidatePageMapTLB(pVM);
5110 pgmUnlock(pVM);
5111}
5112
5113
5114/**
5115 * Response to VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE to allocate a large
5116 * (2MB) page for use with a nested paging PDE.
5117 *
5118 * @returns The following VBox status codes.
5119 * @retval VINF_SUCCESS on success.
5120 * @retval VINF_EM_NO_MEMORY if we're out of memory.
5121 *
5122 * @param pVM The cross context VM structure.
5123 * @param GCPhys GC physical start address of the 2 MB range
5124 */
5125VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
5126{
5127#ifdef PGM_WITH_LARGE_PAGES
5128 uint64_t u64TimeStamp1, u64TimeStamp2;
5129
5130 pgmLock(pVM);
5131
5132 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
5133 u64TimeStamp1 = RTTimeMilliTS();
5134 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE, 0, NULL);
5135 u64TimeStamp2 = RTTimeMilliTS();
5136 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
5137 if (RT_SUCCESS(rc))
5138 {
5139 Assert(pVM->pgm.s.cLargeHandyPages == 1);
5140
5141 uint32_t idPage = pVM->pgm.s.aLargeHandyPage[0].idPage;
5142 RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys;
5143
5144 void *pv;
5145
5146 /* Map the large page into our address space.
5147 *
5148 * Note: assuming that within the 2 MB range:
5149 * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise)
5150 * - user space mapping is continuous as well
5151 * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE)
5152 */
5153 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
5154 AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n", idPage, HCPhys, rc));
5155
5156 if (RT_SUCCESS(rc))
5157 {
5158 /*
5159 * Clear the pages.
5160 */
5161 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
5162 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
5163 {
5164 ASMMemZeroPage(pv);
5165
5166 PPGMPAGE pPage;
5167 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
5168 AssertRC(rc);
5169
5170 Assert(PGM_PAGE_IS_ZERO(pPage));
5171 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
5172 pVM->pgm.s.cZeroPages--;
5173
5174 /*
5175 * Do the PGMPAGE modifications.
5176 */
5177 pVM->pgm.s.cPrivatePages++;
5178 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
5179 PGM_PAGE_SET_PAGEID(pVM, pPage, idPage);
5180 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
5181 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PDE);
5182 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
5183 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
5184
5185 /* Somewhat dirty assumption that page ids are increasing. */
5186 idPage++;
5187
5188 HCPhys += PAGE_SIZE;
5189 GCPhys += PAGE_SIZE;
5190
5191 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
5192
5193 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
5194 }
5195 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
5196
5197 /* Flush all TLBs. */
5198 PGM_INVL_ALL_VCPU_TLBS(pVM);
5199 pgmPhysInvalidatePageMapTLB(pVM);
5200 }
5201 pVM->pgm.s.cLargeHandyPages = 0;
5202 }
5203
5204 if (RT_SUCCESS(rc))
5205 {
5206 static uint32_t cTimeOut = 0;
5207 uint64_t u64TimeStampDelta = u64TimeStamp2 - u64TimeStamp1;
5208
5209 if (u64TimeStampDelta > 100)
5210 {
5211 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatLargePageOverflow);
5212 if ( ++cTimeOut > 10
5213 || u64TimeStampDelta > 1000 /* more than one second forces an early retirement from allocating large pages. */)
5214 {
5215 /* If repeated attempts to allocate a large page takes more than 100 ms, then we fall back to normal 4k pages.
5216 * E.g. Vista 64 tries to move memory around, which takes a huge amount of time.
5217 */
5218 LogRel(("PGMR3PhysAllocateLargePage: allocating large pages takes too long (last attempt %d ms; nr of timeouts %d); DISABLE\n", u64TimeStampDelta, cTimeOut));
5219 PGMSetLargePageUsage(pVM, false);
5220 }
5221 }
5222 else
5223 if (cTimeOut > 0)
5224 cTimeOut--;
5225 }
5226
5227 pgmUnlock(pVM);
5228 return rc;
5229#else
5230 RT_NOREF(pVM, GCPhys);
5231 return VERR_NOT_IMPLEMENTED;
5232#endif /* PGM_WITH_LARGE_PAGES */
5233}
5234
5235
5236/**
5237 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES.
5238 *
5239 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
5240 * signal and clear the out of memory condition. When contracted, this API is
5241 * used to try clear the condition when the user wants to resume.
5242 *
5243 * @returns The following VBox status codes.
5244 * @retval VINF_SUCCESS on success. FFs cleared.
5245 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
5246 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
5247 *
5248 * @param pVM The cross context VM structure.
5249 *
5250 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
5251 * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
5252 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
5253 * handler.
5254 */
5255VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
5256{
5257 pgmLock(pVM);
5258
5259 /*
5260 * Allocate more pages, noting down the index of the first new page.
5261 */
5262 uint32_t iClear = pVM->pgm.s.cHandyPages;
5263 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_PGM_HANDY_PAGE_IPE);
5264 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
5265 int rcAlloc = VINF_SUCCESS;
5266 int rcSeed = VINF_SUCCESS;
5267 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
5268 while (rc == VERR_GMM_SEED_ME)
5269 {
5270 void *pvChunk;
5271 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
5272 if (RT_SUCCESS(rc))
5273 {
5274 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
5275 if (RT_FAILURE(rc))
5276 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
5277 }
5278 if (RT_SUCCESS(rc))
5279 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
5280 }
5281
5282 /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
5283 if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
5284 && pVM->pgm.s.cHandyPages > 0)
5285 {
5286 /* Still handy pages left, so don't panic. */
5287 rc = VINF_SUCCESS;
5288 }
5289
5290 if (RT_SUCCESS(rc))
5291 {
5292 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
5293 Assert(pVM->pgm.s.cHandyPages > 0);
5294 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
5295 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
5296
5297#ifdef VBOX_STRICT
5298 uint32_t i;
5299 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++)
5300 if ( pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID
5301 || pVM->pgm.s.aHandyPages[i].idSharedPage != NIL_GMM_PAGEID
5302 || (pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & PAGE_OFFSET_MASK))
5303 break;
5304 if (i != pVM->pgm.s.cHandyPages)
5305 {
5306 RTAssertMsg1Weak(NULL, __LINE__, __FILE__, __FUNCTION__);
5307 RTAssertMsg2Weak("i=%d iClear=%d cHandyPages=%d\n", i, iClear, pVM->pgm.s.cHandyPages);
5308 for (uint32_t j = iClear; j < pVM->pgm.s.cHandyPages; j++)
5309 RTAssertMsg2Add("%03d: idPage=%d HCPhysGCPhys=%RHp idSharedPage=%d%\n", j,
5310 pVM->pgm.s.aHandyPages[j].idPage,
5311 pVM->pgm.s.aHandyPages[j].HCPhysGCPhys,
5312 pVM->pgm.s.aHandyPages[j].idSharedPage,
5313 j == i ? " <---" : "");
5314 RTAssertPanic();
5315 }
5316#endif
5317 /*
5318 * Clear the pages.
5319 */
5320 while (iClear < pVM->pgm.s.cHandyPages)
5321 {
5322 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
5323 void *pv;
5324 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
5325 AssertLogRelMsgBreak(RT_SUCCESS(rc),
5326 ("%u/%u: idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n",
5327 iClear, pVM->pgm.s.cHandyPages, pPage->idPage, pPage->HCPhysGCPhys, rc));
5328 ASMMemZeroPage(pv);
5329 iClear++;
5330 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
5331 }
5332 }
5333 else
5334 {
5335 uint64_t cAllocPages, cMaxPages, cBalloonPages;
5336
5337 /*
5338 * We should never get here unless there is a genuine shortage of
5339 * memory (or some internal error). Flag the error so the VM can be
5340 * suspended ASAP and the user informed. If we're totally out of
5341 * handy pages we will return failure.
5342 */
5343 /* Report the failure. */
5344 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
5345 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
5346 rc, rcAlloc, rcSeed,
5347 pVM->pgm.s.cHandyPages,
5348 pVM->pgm.s.cAllPages,
5349 pVM->pgm.s.cPrivatePages,
5350 pVM->pgm.s.cSharedPages,
5351 pVM->pgm.s.cZeroPages));
5352
5353 if (GMMR3QueryMemoryStats(pVM, &cAllocPages, &cMaxPages, &cBalloonPages) == VINF_SUCCESS)
5354 {
5355 LogRel(("GMM: Statistics:\n"
5356 " Allocated pages: %RX64\n"
5357 " Maximum pages: %RX64\n"
5358 " Ballooned pages: %RX64\n", cAllocPages, cMaxPages, cBalloonPages));
5359 }
5360
5361 if ( rc != VERR_NO_MEMORY
5362 && rc != VERR_NO_PHYS_MEMORY
5363 && rc != VERR_LOCK_FAILED)
5364 {
5365 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
5366 {
5367 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
5368 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
5369 pVM->pgm.s.aHandyPages[i].idSharedPage));
5370 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
5371 if (idPage != NIL_GMM_PAGEID)
5372 {
5373 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
5374 pRam;
5375 pRam = pRam->pNextR3)
5376 {
5377 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
5378 for (uint32_t iPage = 0; iPage < cPages; iPage++)
5379 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
5380 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
5381 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
5382 }
5383 }
5384 }
5385 }
5386
5387 if (rc == VERR_NO_MEMORY)
5388 {
5389 uint64_t cbHostRamAvail = 0;
5390 int rc2 = RTSystemQueryAvailableRam(&cbHostRamAvail);
5391 if (RT_SUCCESS(rc2))
5392 LogRel(("Host RAM: %RU64MB available\n", cbHostRamAvail / _1M));
5393 else
5394 LogRel(("Cannot determine the amount of available host memory\n"));
5395 }
5396
5397 /* Set the FFs and adjust rc. */
5398 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
5399 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
5400 if ( rc == VERR_NO_MEMORY
5401 || rc == VERR_NO_PHYS_MEMORY
5402 || rc == VERR_LOCK_FAILED)
5403 rc = VINF_EM_NO_MEMORY;
5404 }
5405
5406 pgmUnlock(pVM);
5407 return rc;
5408}
5409
5410
5411/**
5412 * Frees the specified RAM page and replaces it with the ZERO page.
5413 *
5414 * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
5415 *
5416 * @param pVM The cross context VM structure.
5417 * @param pReq Pointer to the request.
5418 * @param pcPendingPages Where the number of pages waiting to be freed are
5419 * kept. This will normally be incremented.
5420 * @param pPage Pointer to the page structure.
5421 * @param GCPhys The guest physical address of the page, if applicable.
5422 * @param enmNewType New page type for NEM notification, since several
5423 * callers will change the type upon successful return.
5424 *
5425 * @remarks The caller must own the PGM lock.
5426 */
5427int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
5428 PGMPAGETYPE enmNewType)
5429{
5430 /*
5431 * Assert sanity.
5432 */
5433 PGM_LOCK_ASSERT_OWNER(pVM);
5434 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
5435 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
5436 {
5437 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
5438 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
5439 }
5440
5441 /** @todo What about ballooning of large pages??! */
5442 Assert( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
5443 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
5444
5445 if ( PGM_PAGE_IS_ZERO(pPage)
5446 || PGM_PAGE_IS_BALLOONED(pPage))
5447 return VINF_SUCCESS;
5448
5449 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
5450 Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
5451 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
5452 || idPage > GMM_PAGEID_LAST
5453 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
5454 {
5455 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
5456 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
5457 }
5458 const RTHCPHYS HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
5459
5460 /* update page count stats. */
5461 if (PGM_PAGE_IS_SHARED(pPage))
5462 pVM->pgm.s.cSharedPages--;
5463 else
5464 pVM->pgm.s.cPrivatePages--;
5465 pVM->pgm.s.cZeroPages++;
5466
5467 /* Deal with write monitored pages. */
5468 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
5469 {
5470 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
5471 pVM->pgm.s.cWrittenToPages++;
5472 }
5473
5474 /*
5475 * pPage = ZERO page.
5476 */
5477 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
5478 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
5479 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
5480 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
5481 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
5482 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
5483
5484 /* Flush physical page map TLB entry. */
5485 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
5486
5487 /* Notify NEM. */
5488 /** @todo consider doing batch NEM notifications. */
5489 if (VM_IS_NEM_ENABLED(pVM))
5490 {
5491 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
5492 NEMHCNotifyPhysPageChanged(pVM, GCPhys, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
5493 pgmPhysPageCalcNemProtection(pPage, enmNewType), enmNewType, &u2State);
5494 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
5495 }
5496
5497 /*
5498 * Make sure it's not in the handy page array.
5499 */
5500 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
5501 {
5502 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
5503 {
5504 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
5505 break;
5506 }
5507 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
5508 {
5509 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
5510 break;
5511 }
5512 }
5513
5514 /*
5515 * Push it onto the page array.
5516 */
5517 uint32_t iPage = *pcPendingPages;
5518 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
5519 *pcPendingPages += 1;
5520
5521 pReq->aPages[iPage].idPage = idPage;
5522
5523 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
5524 return VINF_SUCCESS;
5525
5526 /*
5527 * Flush the pages.
5528 */
5529 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
5530 if (RT_SUCCESS(rc))
5531 {
5532 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
5533 *pcPendingPages = 0;
5534 }
5535 return rc;
5536}
5537
5538
5539/**
5540 * Converts a GC physical address to a HC ring-3 pointer, with some
5541 * additional checks.
5542 *
5543 * @returns VBox status code.
5544 * @retval VINF_SUCCESS on success.
5545 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
5546 * access handler of some kind.
5547 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
5548 * accesses or is odd in any way.
5549 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
5550 *
5551 * @param pVM The cross context VM structure.
5552 * @param GCPhys The GC physical address to convert. Since this is only
5553 * used for filling the REM TLB, the A20 mask must be
5554 * applied before calling this API.
5555 * @param fWritable Whether write access is required.
5556 * @param ppv Where to store the pointer corresponding to GCPhys on
5557 * success.
5558 */
5559VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
5560{
5561 pgmLock(pVM);
5562 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
5563
5564 PPGMRAMRANGE pRam;
5565 PPGMPAGE pPage;
5566 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
5567 if (RT_SUCCESS(rc))
5568 {
5569 if (PGM_PAGE_IS_BALLOONED(pPage))
5570 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
5571 else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
5572 rc = VINF_SUCCESS;
5573 else
5574 {
5575 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
5576 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5577 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
5578 {
5579 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
5580 * in -norawr0 mode. */
5581 if (fWritable)
5582 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
5583 }
5584 else
5585 {
5586 /* Temporarily disabled physical handler(s), since the recompiler
5587 doesn't get notified when it's reset we'll have to pretend it's
5588 operating normally. */
5589 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
5590 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5591 else
5592 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
5593 }
5594 }
5595 if (RT_SUCCESS(rc))
5596 {
5597 int rc2;
5598
5599 /* Make sure what we return is writable. */
5600 if (fWritable)
5601 switch (PGM_PAGE_GET_STATE(pPage))
5602 {
5603 case PGM_PAGE_STATE_ALLOCATED:
5604 break;
5605 case PGM_PAGE_STATE_BALLOONED:
5606 AssertFailed();
5607 break;
5608 case PGM_PAGE_STATE_ZERO:
5609 case PGM_PAGE_STATE_SHARED:
5610 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
5611 break;
5612 RT_FALL_THRU();
5613 case PGM_PAGE_STATE_WRITE_MONITORED:
5614 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
5615 AssertLogRelRCReturn(rc2, rc2);
5616 break;
5617 }
5618
5619 /* Get a ring-3 mapping of the address. */
5620 PPGMPAGER3MAPTLBE pTlbe;
5621 rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
5622 AssertLogRelRCReturn(rc2, rc2);
5623 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
5624 /** @todo mapping/locking hell; this isn't horribly efficient since
5625 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
5626
5627 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
5628 }
5629 else
5630 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
5631
5632 /* else: handler catching all access, no pointer returned. */
5633 }
5634 else
5635 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
5636
5637 pgmUnlock(pVM);
5638 return rc;
5639}
5640
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette