VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp@ 104843

Last change on this file since 104843 was 104843, checked in by vboxsync, 8 months ago

VMM/PGM: Build fix. bugref:10687 bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 245.3 KB
Line 
1/* $Id: PGMPhys.cpp 104843 2024-06-05 01:09:28Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/stam.h>
40#include <VBox/vmm/pdmdev.h>
41#include "PGMInternal.h"
42#include <VBox/vmm/vmcc.h>
43
44#include "PGMInline.h"
45
46#include <VBox/sup.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#include <iprt/alloc.h>
52#include <iprt/asm.h>
53#ifdef VBOX_STRICT
54# include <iprt/crc.h>
55#endif
56#include <iprt/thread.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64/** The number of pages to free in one batch. */
65#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
66
67
68
69/*********************************************************************************************************************************
70* Reading and Writing Guest Pysical Memory *
71*********************************************************************************************************************************/
72
73/*
74 * PGMR3PhysReadU8-64
75 * PGMR3PhysWriteU8-64
76 */
77#define PGMPHYSFN_READNAME PGMR3PhysReadU8
78#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
79#define PGMPHYS_DATASIZE 1
80#define PGMPHYS_DATATYPE uint8_t
81#include "PGMPhysRWTmpl.h"
82
83#define PGMPHYSFN_READNAME PGMR3PhysReadU16
84#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
85#define PGMPHYS_DATASIZE 2
86#define PGMPHYS_DATATYPE uint16_t
87#include "PGMPhysRWTmpl.h"
88
89#define PGMPHYSFN_READNAME PGMR3PhysReadU32
90#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
91#define PGMPHYS_DATASIZE 4
92#define PGMPHYS_DATATYPE uint32_t
93#include "PGMPhysRWTmpl.h"
94
95#define PGMPHYSFN_READNAME PGMR3PhysReadU64
96#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
97#define PGMPHYS_DATASIZE 8
98#define PGMPHYS_DATATYPE uint64_t
99#include "PGMPhysRWTmpl.h"
100
101
102/**
103 * EMT worker for PGMR3PhysReadExternal.
104 */
105static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead,
106 PGMACCESSORIGIN enmOrigin)
107{
108 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead, enmOrigin);
109 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
110 return VINF_SUCCESS;
111}
112
113
114/**
115 * Read from physical memory, external users.
116 *
117 * @returns VBox status code.
118 * @retval VINF_SUCCESS.
119 *
120 * @param pVM The cross context VM structure.
121 * @param GCPhys Physical address to read from.
122 * @param pvBuf Where to read into.
123 * @param cbRead How many bytes to read.
124 * @param enmOrigin Who is calling.
125 *
126 * @thread Any but EMTs.
127 */
128VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
129{
130 VM_ASSERT_OTHER_THREAD(pVM);
131
132 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
133 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
134
135 PGM_LOCK_VOID(pVM);
136
137 /*
138 * Copy loop on ram ranges.
139 */
140 for (;;)
141 {
142 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
143
144 /* Inside range or not? */
145 if (pRam && GCPhys >= pRam->GCPhys)
146 {
147 /*
148 * Must work our way thru this page by page.
149 */
150 RTGCPHYS off = GCPhys - pRam->GCPhys;
151 while (off < pRam->cb)
152 {
153 unsigned iPage = off >> GUEST_PAGE_SHIFT;
154 PPGMPAGE pPage = &pRam->aPages[iPage];
155
156 /*
157 * If the page has an ALL access handler, we'll have to
158 * delegate the job to EMT.
159 */
160 if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
161 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
162 {
163 PGM_UNLOCK(pVM);
164
165 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 5,
166 pVM, &GCPhys, pvBuf, cbRead, enmOrigin);
167 }
168 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
169
170 /*
171 * Simple stuff, go ahead.
172 */
173 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
174 if (cb > cbRead)
175 cb = cbRead;
176 PGMPAGEMAPLOCK PgMpLck;
177 const void *pvSrc;
178 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
179 if (RT_SUCCESS(rc))
180 {
181 memcpy(pvBuf, pvSrc, cb);
182 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
183 }
184 else
185 {
186 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
187 pRam->GCPhys + off, pPage, rc));
188 memset(pvBuf, 0xff, cb);
189 }
190
191 /* next page */
192 if (cb >= cbRead)
193 {
194 PGM_UNLOCK(pVM);
195 return VINF_SUCCESS;
196 }
197 cbRead -= cb;
198 off += cb;
199 GCPhys += cb;
200 pvBuf = (char *)pvBuf + cb;
201 } /* walk pages in ram range. */
202 }
203 else
204 {
205 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
206
207 /*
208 * Unassigned address space.
209 */
210 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
211 if (cb >= cbRead)
212 {
213 memset(pvBuf, 0xff, cbRead);
214 break;
215 }
216 memset(pvBuf, 0xff, cb);
217
218 cbRead -= cb;
219 pvBuf = (char *)pvBuf + cb;
220 GCPhys += cb;
221 }
222 } /* Ram range walk */
223
224 PGM_UNLOCK(pVM);
225
226 return VINF_SUCCESS;
227}
228
229
230/**
231 * EMT worker for PGMR3PhysWriteExternal.
232 */
233static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite,
234 PGMACCESSORIGIN enmOrigin)
235{
236 /** @todo VERR_EM_NO_MEMORY */
237 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite, enmOrigin);
238 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
239 return VINF_SUCCESS;
240}
241
242
243/**
244 * Write to physical memory, external users.
245 *
246 * @returns VBox status code.
247 * @retval VINF_SUCCESS.
248 * @retval VERR_EM_NO_MEMORY.
249 *
250 * @param pVM The cross context VM structure.
251 * @param GCPhys Physical address to write to.
252 * @param pvBuf What to write.
253 * @param cbWrite How many bytes to write.
254 * @param enmOrigin Who is calling.
255 *
256 * @thread Any but EMTs.
257 */
258VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
259{
260 VM_ASSERT_OTHER_THREAD(pVM);
261
262 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
263 ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x enmOrigin=%d\n",
264 GCPhys, cbWrite, enmOrigin));
265 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
266 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
267
268 PGM_LOCK_VOID(pVM);
269
270 /*
271 * Copy loop on ram ranges, stop when we hit something difficult.
272 */
273 for (;;)
274 {
275 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
276
277 /* Inside range or not? */
278 if (pRam && GCPhys >= pRam->GCPhys)
279 {
280 /*
281 * Must work our way thru this page by page.
282 */
283 RTGCPTR off = GCPhys - pRam->GCPhys;
284 while (off < pRam->cb)
285 {
286 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
287 PPGMPAGE pPage = &pRam->aPages[iPage];
288
289 /*
290 * Is the page problematic, we have to do the work on the EMT.
291 *
292 * Allocating writable pages and access handlers are
293 * problematic, write monitored pages are simple and can be
294 * dealt with here.
295 */
296 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
297 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
298 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
299 {
300 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
301 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
302 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
303 else
304 {
305 PGM_UNLOCK(pVM);
306
307 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 5,
308 pVM, &GCPhys, pvBuf, cbWrite, enmOrigin);
309 }
310 }
311 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
312
313 /*
314 * Simple stuff, go ahead.
315 */
316 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
317 if (cb > cbWrite)
318 cb = cbWrite;
319 PGMPAGEMAPLOCK PgMpLck;
320 void *pvDst;
321 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
322 if (RT_SUCCESS(rc))
323 {
324 memcpy(pvDst, pvBuf, cb);
325 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
326 }
327 else
328 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
329 pRam->GCPhys + off, pPage, rc));
330
331 /* next page */
332 if (cb >= cbWrite)
333 {
334 PGM_UNLOCK(pVM);
335 return VINF_SUCCESS;
336 }
337
338 cbWrite -= cb;
339 off += cb;
340 GCPhys += cb;
341 pvBuf = (const char *)pvBuf + cb;
342 } /* walk pages in ram range */
343 }
344 else
345 {
346 /*
347 * Unassigned address space, skip it.
348 */
349 if (!pRam)
350 break;
351 size_t cb = pRam->GCPhys - GCPhys;
352 if (cb >= cbWrite)
353 break;
354 cbWrite -= cb;
355 pvBuf = (const char *)pvBuf + cb;
356 GCPhys += cb;
357 }
358 } /* Ram range walk */
359
360 PGM_UNLOCK(pVM);
361 return VINF_SUCCESS;
362}
363
364
365/*********************************************************************************************************************************
366* Mapping Guest Physical Memory *
367*********************************************************************************************************************************/
368
369/**
370 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
371 *
372 * @returns see PGMR3PhysGCPhys2CCPtrExternal
373 * @param pVM The cross context VM structure.
374 * @param pGCPhys Pointer to the guest physical address.
375 * @param ppv Where to store the mapping address.
376 * @param pLock Where to store the lock.
377 */
378static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
379{
380 /*
381 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
382 * an access handler after it succeeds.
383 */
384 int rc = PGM_LOCK(pVM);
385 AssertRCReturn(rc, rc);
386
387 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
388 if (RT_SUCCESS(rc))
389 {
390 PPGMPAGEMAPTLBE pTlbe;
391 int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
392 AssertFatalRC(rc2);
393 PPGMPAGE pPage = pTlbe->pPage;
394 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
395 {
396 PGMPhysReleasePageMappingLock(pVM, pLock);
397 rc = VERR_PGM_PHYS_PAGE_RESERVED;
398 }
399 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
400#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
401 || pgmPoolIsDirtyPage(pVM, *pGCPhys)
402#endif
403 )
404 {
405 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
406 * not be informed about writes and keep bogus gst->shw mappings around.
407 */
408 pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
409 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
410 /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
411 * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
412 }
413 }
414
415 PGM_UNLOCK(pVM);
416 return rc;
417}
418
419
420/**
421 * Requests the mapping of a guest page into ring-3, external threads.
422 *
423 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
424 * release it.
425 *
426 * This API will assume your intention is to write to the page, and will
427 * therefore replace shared and zero pages. If you do not intend to modify the
428 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
429 *
430 * @returns VBox status code.
431 * @retval VINF_SUCCESS on success.
432 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
433 * backing or if the page has any active access handlers. The caller
434 * must fall back on using PGMR3PhysWriteExternal.
435 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
436 *
437 * @param pVM The cross context VM structure.
438 * @param GCPhys The guest physical address of the page that should be mapped.
439 * @param ppv Where to store the address corresponding to GCPhys.
440 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
441 *
442 * @remark Avoid calling this API from within critical sections (other than the
443 * PGM one) because of the deadlock risk when we have to delegating the
444 * task to an EMT.
445 * @thread Any.
446 */
447VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
448{
449 AssertPtr(ppv);
450 AssertPtr(pLock);
451
452 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
453
454 int rc = PGM_LOCK(pVM);
455 AssertRCReturn(rc, rc);
456
457 /*
458 * Query the Physical TLB entry for the page (may fail).
459 */
460 PPGMPAGEMAPTLBE pTlbe;
461 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
462 if (RT_SUCCESS(rc))
463 {
464 PPGMPAGE pPage = pTlbe->pPage;
465 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
466 rc = VERR_PGM_PHYS_PAGE_RESERVED;
467 else
468 {
469 /*
470 * If the page is shared, the zero page, or being write monitored
471 * it must be converted to an page that's writable if possible.
472 * We can only deal with write monitored pages here, the rest have
473 * to be on an EMT.
474 */
475 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
476 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
477#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
478 || pgmPoolIsDirtyPage(pVM, GCPhys)
479#endif
480 )
481 {
482 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
483 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
484#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
485 && !pgmPoolIsDirtyPage(pVM, GCPhys) /** @todo we're very likely doing this twice. */
486#endif
487 )
488 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
489 else
490 {
491 PGM_UNLOCK(pVM);
492
493 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
494 pVM, &GCPhys, ppv, pLock);
495 }
496 }
497
498 /*
499 * Now, just perform the locking and calculate the return address.
500 */
501 PPGMPAGEMAP pMap = pTlbe->pMap;
502 if (pMap)
503 pMap->cRefs++;
504
505 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
506 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
507 {
508 if (cLocks == 0)
509 pVM->pgm.s.cWriteLockedPages++;
510 PGM_PAGE_INC_WRITE_LOCKS(pPage);
511 }
512 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
513 {
514 PGM_PAGE_INC_WRITE_LOCKS(pPage);
515 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
516 if (pMap)
517 pMap->cRefs++; /* Extra ref to prevent it from going away. */
518 }
519
520 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
521 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
522 pLock->pvMap = pMap;
523 }
524 }
525
526 PGM_UNLOCK(pVM);
527 return rc;
528}
529
530
531/**
532 * Requests the mapping of a guest page into ring-3, external threads.
533 *
534 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
535 * release it.
536 *
537 * @returns VBox status code.
538 * @retval VINF_SUCCESS on success.
539 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
540 * backing or if the page as an active ALL access handler. The caller
541 * must fall back on using PGMPhysRead.
542 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
543 *
544 * @param pVM The cross context VM structure.
545 * @param GCPhys The guest physical address of the page that should be mapped.
546 * @param ppv Where to store the address corresponding to GCPhys.
547 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
548 *
549 * @remark Avoid calling this API from within critical sections (other than
550 * the PGM one) because of the deadlock risk.
551 * @thread Any.
552 */
553VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
554{
555 int rc = PGM_LOCK(pVM);
556 AssertRCReturn(rc, rc);
557
558 /*
559 * Query the Physical TLB entry for the page (may fail).
560 */
561 PPGMPAGEMAPTLBE pTlbe;
562 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
563 if (RT_SUCCESS(rc))
564 {
565 PPGMPAGE pPage = pTlbe->pPage;
566#if 1
567 /* MMIO pages doesn't have any readable backing. */
568 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
569 rc = VERR_PGM_PHYS_PAGE_RESERVED;
570#else
571 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
572 rc = VERR_PGM_PHYS_PAGE_RESERVED;
573#endif
574 else
575 {
576 /*
577 * Now, just perform the locking and calculate the return address.
578 */
579 PPGMPAGEMAP pMap = pTlbe->pMap;
580 if (pMap)
581 pMap->cRefs++;
582
583 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
584 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
585 {
586 if (cLocks == 0)
587 pVM->pgm.s.cReadLockedPages++;
588 PGM_PAGE_INC_READ_LOCKS(pPage);
589 }
590 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
591 {
592 PGM_PAGE_INC_READ_LOCKS(pPage);
593 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
594 if (pMap)
595 pMap->cRefs++; /* Extra ref to prevent it from going away. */
596 }
597
598 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
599 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
600 pLock->pvMap = pMap;
601 }
602 }
603
604 PGM_UNLOCK(pVM);
605 return rc;
606}
607
608
609/**
610 * Requests the mapping of multiple guest page into ring-3, external threads.
611 *
612 * When you're done with the pages, call PGMPhysBulkReleasePageMappingLock()
613 * ASAP to release them.
614 *
615 * This API will assume your intention is to write to the pages, and will
616 * therefore replace shared and zero pages. If you do not intend to modify the
617 * pages, use the PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal() API.
618 *
619 * @returns VBox status code.
620 * @retval VINF_SUCCESS on success.
621 * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
622 * backing or if any of the pages the page has any active access
623 * handlers. The caller must fall back on using PGMR3PhysWriteExternal.
624 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
625 * an invalid physical address.
626 *
627 * @param pVM The cross context VM structure.
628 * @param cPages Number of pages to lock.
629 * @param paGCPhysPages The guest physical address of the pages that
630 * should be mapped (@a cPages entries).
631 * @param papvPages Where to store the ring-3 mapping addresses
632 * corresponding to @a paGCPhysPages.
633 * @param paLocks Where to store the locking information that
634 * pfnPhysBulkReleasePageMappingLock needs (@a cPages
635 * in length).
636 *
637 * @remark Avoid calling this API from within critical sections (other than the
638 * PGM one) because of the deadlock risk when we have to delegating the
639 * task to an EMT.
640 * @thread Any.
641 */
642VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
643 void **papvPages, PPGMPAGEMAPLOCK paLocks)
644{
645 Assert(cPages > 0);
646 AssertPtr(papvPages);
647 AssertPtr(paLocks);
648
649 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
650
651 int rc = PGM_LOCK(pVM);
652 AssertRCReturn(rc, rc);
653
654 /*
655 * Lock the pages one by one.
656 * The loop body is similar to PGMR3PhysGCPhys2CCPtrExternal.
657 */
658 int32_t cNextYield = 128;
659 uint32_t iPage;
660 for (iPage = 0; iPage < cPages; iPage++)
661 {
662 if (--cNextYield > 0)
663 { /* likely */ }
664 else
665 {
666 PGM_UNLOCK(pVM);
667 ASMNopPause();
668 PGM_LOCK_VOID(pVM);
669 cNextYield = 128;
670 }
671
672 /*
673 * Query the Physical TLB entry for the page (may fail).
674 */
675 PPGMPAGEMAPTLBE pTlbe;
676 rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
677 if (RT_SUCCESS(rc))
678 { }
679 else
680 break;
681 PPGMPAGE pPage = pTlbe->pPage;
682
683 /*
684 * No MMIO or active access handlers.
685 */
686 if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
687 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
688 { }
689 else
690 {
691 rc = VERR_PGM_PHYS_PAGE_RESERVED;
692 break;
693 }
694
695 /*
696 * The page must be in the allocated state and not be a dirty pool page.
697 * We can handle converting a write monitored page to an allocated one, but
698 * anything more complicated must be delegated to an EMT.
699 */
700 bool fDelegateToEmt = false;
701 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
702#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
703 fDelegateToEmt = pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]);
704#else
705 fDelegateToEmt = false;
706#endif
707 else if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
708 {
709#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
710 if (!pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]))
711 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, paGCPhysPages[iPage]);
712 else
713 fDelegateToEmt = true;
714#endif
715 }
716 else
717 fDelegateToEmt = true;
718 if (!fDelegateToEmt)
719 { }
720 else
721 {
722 /* We could do this delegation in bulk, but considered too much work vs gain. */
723 PGM_UNLOCK(pVM);
724 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
725 pVM, &paGCPhysPages[iPage], &papvPages[iPage], &paLocks[iPage]);
726 PGM_LOCK_VOID(pVM);
727 if (RT_FAILURE(rc))
728 break;
729 cNextYield = 128;
730 }
731
732 /*
733 * Now, just perform the locking and address calculation.
734 */
735 PPGMPAGEMAP pMap = pTlbe->pMap;
736 if (pMap)
737 pMap->cRefs++;
738
739 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
740 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
741 {
742 if (cLocks == 0)
743 pVM->pgm.s.cWriteLockedPages++;
744 PGM_PAGE_INC_WRITE_LOCKS(pPage);
745 }
746 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
747 {
748 PGM_PAGE_INC_WRITE_LOCKS(pPage);
749 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", paGCPhysPages[iPage], pPage));
750 if (pMap)
751 pMap->cRefs++; /* Extra ref to prevent it from going away. */
752 }
753
754 papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & GUEST_PAGE_OFFSET_MASK));
755 paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
756 paLocks[iPage].pvMap = pMap;
757 }
758
759 PGM_UNLOCK(pVM);
760
761 /*
762 * On failure we must unlock any pages we managed to get already.
763 */
764 if (RT_FAILURE(rc) && iPage > 0)
765 PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
766
767 return rc;
768}
769
770
771/**
772 * Requests the mapping of multiple guest page into ring-3, for reading only,
773 * external threads.
774 *
775 * When you're done with the pages, call PGMPhysReleasePageMappingLock() ASAP
776 * to release them.
777 *
778 * @returns VBox status code.
779 * @retval VINF_SUCCESS on success.
780 * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
781 * backing or if any of the pages the page has an active ALL access
782 * handler. The caller must fall back on using PGMR3PhysWriteExternal.
783 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
784 * an invalid physical address.
785 *
786 * @param pVM The cross context VM structure.
787 * @param cPages Number of pages to lock.
788 * @param paGCPhysPages The guest physical address of the pages that
789 * should be mapped (@a cPages entries).
790 * @param papvPages Where to store the ring-3 mapping addresses
791 * corresponding to @a paGCPhysPages.
792 * @param paLocks Where to store the lock information that
793 * pfnPhysReleasePageMappingLock needs (@a cPages
794 * in length).
795 *
796 * @remark Avoid calling this API from within critical sections (other than
797 * the PGM one) because of the deadlock risk.
798 * @thread Any.
799 */
800VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
801 void const **papvPages, PPGMPAGEMAPLOCK paLocks)
802{
803 Assert(cPages > 0);
804 AssertPtr(papvPages);
805 AssertPtr(paLocks);
806
807 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
808
809 int rc = PGM_LOCK(pVM);
810 AssertRCReturn(rc, rc);
811
812 /*
813 * Lock the pages one by one.
814 * The loop body is similar to PGMR3PhysGCPhys2CCPtrReadOnlyExternal.
815 */
816 int32_t cNextYield = 256;
817 uint32_t iPage;
818 for (iPage = 0; iPage < cPages; iPage++)
819 {
820 if (--cNextYield > 0)
821 { /* likely */ }
822 else
823 {
824 PGM_UNLOCK(pVM);
825 ASMNopPause();
826 PGM_LOCK_VOID(pVM);
827 cNextYield = 256;
828 }
829
830 /*
831 * Query the Physical TLB entry for the page (may fail).
832 */
833 PPGMPAGEMAPTLBE pTlbe;
834 rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
835 if (RT_SUCCESS(rc))
836 { }
837 else
838 break;
839 PPGMPAGE pPage = pTlbe->pPage;
840
841 /*
842 * No MMIO or active all access handlers, everything else can be accessed.
843 */
844 if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
845 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
846 { }
847 else
848 {
849 rc = VERR_PGM_PHYS_PAGE_RESERVED;
850 break;
851 }
852
853 /*
854 * Now, just perform the locking and address calculation.
855 */
856 PPGMPAGEMAP pMap = pTlbe->pMap;
857 if (pMap)
858 pMap->cRefs++;
859
860 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
861 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
862 {
863 if (cLocks == 0)
864 pVM->pgm.s.cReadLockedPages++;
865 PGM_PAGE_INC_READ_LOCKS(pPage);
866 }
867 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
868 {
869 PGM_PAGE_INC_READ_LOCKS(pPage);
870 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", paGCPhysPages[iPage], pPage));
871 if (pMap)
872 pMap->cRefs++; /* Extra ref to prevent it from going away. */
873 }
874
875 papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & GUEST_PAGE_OFFSET_MASK));
876 paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
877 paLocks[iPage].pvMap = pMap;
878 }
879
880 PGM_UNLOCK(pVM);
881
882 /*
883 * On failure we must unlock any pages we managed to get already.
884 */
885 if (RT_FAILURE(rc) && iPage > 0)
886 PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
887
888 return rc;
889}
890
891
892/**
893 * Converts a GC physical address to a HC ring-3 pointer, with some
894 * additional checks.
895 *
896 * @returns VBox status code.
897 * @retval VINF_SUCCESS on success.
898 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
899 * access handler of some kind.
900 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
901 * accesses or is odd in any way.
902 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
903 *
904 * @param pVM The cross context VM structure.
905 * @param GCPhys The GC physical address to convert. Since this is only
906 * used for filling the REM TLB, the A20 mask must be
907 * applied before calling this API.
908 * @param fWritable Whether write access is required.
909 * @param ppv Where to store the pointer corresponding to GCPhys on
910 * success.
911 */
912VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
913{
914 PGM_LOCK_VOID(pVM);
915 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
916
917 PPGMRAMRANGE pRam;
918 PPGMPAGE pPage;
919 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
920 if (RT_SUCCESS(rc))
921 {
922 if (PGM_PAGE_IS_BALLOONED(pPage))
923 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
924 else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
925 rc = VINF_SUCCESS;
926 else
927 {
928 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
929 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
930 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
931 {
932 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
933 * in -norawr0 mode. */
934 if (fWritable)
935 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
936 }
937 else
938 {
939 /* Temporarily disabled physical handler(s), since the recompiler
940 doesn't get notified when it's reset we'll have to pretend it's
941 operating normally. */
942 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
943 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
944 else
945 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
946 }
947 }
948 if (RT_SUCCESS(rc))
949 {
950 int rc2;
951
952 /* Make sure what we return is writable. */
953 if (fWritable)
954 switch (PGM_PAGE_GET_STATE(pPage))
955 {
956 case PGM_PAGE_STATE_ALLOCATED:
957 break;
958 case PGM_PAGE_STATE_BALLOONED:
959 AssertFailed();
960 break;
961 case PGM_PAGE_STATE_ZERO:
962 case PGM_PAGE_STATE_SHARED:
963 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
964 break;
965 RT_FALL_THRU();
966 case PGM_PAGE_STATE_WRITE_MONITORED:
967 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
968 AssertLogRelRCReturn(rc2, rc2);
969 break;
970 }
971
972 /* Get a ring-3 mapping of the address. */
973 PPGMPAGER3MAPTLBE pTlbe;
974 rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
975 AssertLogRelRCReturn(rc2, rc2);
976 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
977 /** @todo mapping/locking hell; this isn't horribly efficient since
978 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
979
980 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
981 }
982 else
983 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
984
985 /* else: handler catching all access, no pointer returned. */
986 }
987 else
988 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
989
990 PGM_UNLOCK(pVM);
991 return rc;
992}
993
994
995
996/*********************************************************************************************************************************
997* RAM Range Management *
998*********************************************************************************************************************************/
999
1000/**
1001 * Given the range @a GCPhys thru @a GCPhysLast, find overlapping RAM range or
1002 * the correct insertion point.
1003 *
1004 * @returns Pointer to overlapping RAM range if found, NULL if not.
1005 * @param pVM The cross context VM structure.
1006 * @param GCPhys The address of the first byte in the range.
1007 * @param GCPhysLast The address of the last byte in the range.
1008 * @param pidxInsert Where to return the lookup table index to insert the
1009 * range at when returning NULL. Set to UINT32_MAX when
1010 * returning the pointer to an overlapping range.
1011 * @note Caller must own the PGM lock.
1012 */
1013static PPGMRAMRANGE pgmR3PhysRamRangeFindOverlapping(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint32_t *pidxInsert)
1014{
1015 PGM_LOCK_ASSERT_OWNER(pVM);
1016 uint32_t iStart = 0;
1017 uint32_t iEnd = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1018 for (;;)
1019 {
1020 uint32_t idxLookup = iStart + (iEnd - iStart) / 2;
1021 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1022 if (GCPhysLast < GCPhysEntryFirst)
1023 {
1024 if (idxLookup > iStart)
1025 iEnd = idxLookup;
1026 else
1027 {
1028 *pidxInsert = idxLookup;
1029 return NULL;
1030 }
1031 }
1032 else
1033 {
1034 RTGCPHYS const GCPhysEntryLast = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast;
1035 if (GCPhys > GCPhysEntryLast)
1036 {
1037 idxLookup += 1;
1038 if (idxLookup < iEnd)
1039 iStart = idxLookup;
1040 else
1041 {
1042 *pidxInsert = idxLookup;
1043 return NULL;
1044 }
1045 }
1046 else
1047 {
1048 /* overlap */
1049 Assert(GCPhysEntryLast > GCPhys && GCPhysEntryFirst < GCPhysLast);
1050 *pidxInsert = UINT32_MAX;
1051 return pVM->pgm.s.apRamRanges[PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup])];
1052 }
1053 }
1054 }
1055}
1056
1057
1058/**
1059 * Given the range @a GCPhys thru @a GCPhysLast, find the lookup table entry
1060 * that's overlapping it.
1061 *
1062 * @returns The lookup table index of the overlapping entry, UINT32_MAX if not
1063 * found.
1064 * @param pVM The cross context VM structure.
1065 * @param GCPhys The address of the first byte in the range.
1066 * @param GCPhysLast The address of the last byte in the range.
1067 * @note Caller must own the PGM lock.
1068 */
1069static uint32_t pgmR3PhysRamRangeFindOverlappingIndex(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
1070{
1071 PGM_LOCK_ASSERT_OWNER(pVM);
1072 uint32_t iStart = 0;
1073 uint32_t iEnd = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1074 for (;;)
1075 {
1076 uint32_t idxLookup = iStart + (iEnd - iStart) / 2;
1077 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1078 if (GCPhysLast < GCPhysEntryFirst)
1079 {
1080 if (idxLookup > iStart)
1081 iEnd = idxLookup;
1082 else
1083 return UINT32_MAX;
1084 }
1085 else
1086 {
1087 RTGCPHYS const GCPhysEntryLast = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast;
1088 if (GCPhys > GCPhysEntryLast)
1089 {
1090 idxLookup += 1;
1091 if (idxLookup < iEnd)
1092 iStart = idxLookup;
1093 else
1094 return UINT32_MAX;
1095 }
1096 else
1097 {
1098 /* overlap */
1099 Assert(GCPhysEntryLast > GCPhys && GCPhysEntryFirst < GCPhysLast);
1100 return idxLookup;
1101 }
1102 }
1103 }
1104}
1105
1106
1107/**
1108 * Insert @a pRam into the lookup table.
1109 *
1110 * @returns VBox status code.
1111 * @param pVM The cross context VM structure.
1112 * @param pRam The RAM range to insert into the lookup table.
1113 * @param pidxLookup Optional lookup table hint. This is updated.
1114 * @note Caller must own PGM lock.
1115 */
1116static int pgmR3PhysRamRangeInsertLookup(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, uint32_t *pidxLookup)
1117{
1118 PGM_LOCK_ASSERT_OWNER(pVM);
1119#ifdef DEBUG_bird
1120 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, true /*fRamRelaxed*/);
1121#endif
1122 AssertMsg(pRam->pszDesc, ("%RGp-%RGp\n", pRam->GCPhys, pRam->GCPhysLast));
1123 AssertLogRelMsgReturn( pRam->GCPhys == NIL_RTGCPHYS
1124 && pRam->GCPhysLast == NIL_RTGCPHYS,
1125 ("GCPhys=%RGp; range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n",
1126 GCPhys, pRam->GCPhys, pRam->cb, pRam->GCPhysLast, pRam->pszDesc),
1127 VERR_ALREADY_EXISTS);
1128 uint32_t const idRamRange = pRam->idRange;
1129 AssertReturn(pVM->pgm.s.apRamRanges[idRamRange] == pRam, VERR_INTERNAL_ERROR_2);
1130
1131 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
1132 RTGCPHYS const GCPhysLast = GCPhys + pRam->cb - 1U;
1133 AssertReturn(GCPhysLast > GCPhys, VERR_INTERNAL_ERROR_4);
1134 LogFlowFunc(("GCPhys=%RGp LB %RGp GCPhysLast=%RGp id=%#x %s\n", GCPhys, pRam->cb, GCPhysLast, idRamRange, pRam->pszDesc));
1135
1136 /*
1137 * Find the lookup table location if necessary.
1138 */
1139 uint32_t const cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1140 AssertLogRelMsgReturn(cLookupEntries + 1 < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), /* id=0 is unused, so < is correct. */
1141 ("%#x\n", cLookupEntries), VERR_INTERNAL_ERROR_3);
1142
1143 uint32_t idxLookup = pidxLookup ? *pidxLookup : UINT32_MAX;
1144 if (cLookupEntries == 0)
1145 idxLookup = 0; /* special case: empty table */
1146 else
1147 {
1148 if ( idxLookup > cLookupEntries
1149 || ( idxLookup != 0
1150 && pVM->pgm.s.aRamRangeLookup[idxLookup - 1].GCPhysLast >= GCPhys)
1151 || ( idxLookup < cLookupEntries
1152 && PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]) < GCPhysLast))
1153 {
1154 PPGMRAMRANGE pOverlapping = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxLookup);
1155 AssertLogRelMsgReturn(!pOverlapping,
1156 ("GCPhys=%RGp; GCPhysLast=%RGp %s - overlaps %RGp...%RGp %s\n",
1157 GCPhys, GCPhysLast, pRam->pszDesc,
1158 pOverlapping->GCPhys, pOverlapping->GCPhysLast, pOverlapping->pszDesc),
1159 VERR_PGM_RAM_CONFLICT);
1160 AssertLogRelMsgReturn(idxLookup <= cLookupEntries, ("%#x vs %#x\n", idxLookup, cLookupEntries), VERR_INTERNAL_ERROR_5);
1161 }
1162 /* else we've got a good hint. */
1163 }
1164
1165 /*
1166 * Do the actual job.
1167 *
1168 * The moving of existing table entries is done in a way that allows other
1169 * EMTs to perform concurrent lookups with the updating.
1170 */
1171 bool const fUseAtomic = pVM->enmVMState != VMSTATE_CREATING
1172 && pVM->cCpus > 1
1173#ifdef RT_ARCH_AMD64
1174 && g_CpumHostFeatures.s.fCmpXchg16b
1175#endif
1176 ;
1177
1178 /* Signal that we're modifying the lookup table: */
1179 uint32_t const idGeneration = (pVM->pgm.s.RamRangeUnion.idGeneration + 1) | 1; /* paranoia^3 */
1180 ASMAtomicWriteU32(&pVM->pgm.s.RamRangeUnion.idGeneration, idGeneration);
1181
1182 /* Update the RAM range entry. */
1183 pRam->GCPhys = GCPhys;
1184 pRam->GCPhysLast = GCPhysLast;
1185
1186 /* Do we need to shift any lookup table entries? */
1187 if (idxLookup != cLookupEntries)
1188 {
1189 /* We do. Make a copy of the final entry first. */
1190 uint32_t cToMove = cLookupEntries - idxLookup;
1191 PGMRAMRANGELOOKUPENTRY *pCur = &pVM->pgm.s.aRamRangeLookup[cLookupEntries];
1192 pCur->GCPhysFirstAndId = pCur[-1].GCPhysFirstAndId;
1193 pCur->GCPhysLast = pCur[-1].GCPhysLast;
1194
1195 /* Then increase the table size. This will ensure that anyone starting
1196 a search from here on should have consistent data. */
1197 ASMAtomicWriteU32(&pVM->pgm.s.RamRangeUnion.cLookupEntries, cLookupEntries + 1);
1198
1199 /* Transfer the rest of the entries. */
1200 cToMove -= 1;
1201 if (cToMove > 0)
1202 {
1203 if (!fUseAtomic)
1204 do
1205 {
1206 pCur -= 1;
1207 pCur->GCPhysFirstAndId = pCur[-1].GCPhysFirstAndId;
1208 pCur->GCPhysLast = pCur[-1].GCPhysLast;
1209 cToMove -= 1;
1210 } while (cToMove > 0);
1211 else
1212 {
1213#if RTASM_HAVE_WRITE_U128 >= 2
1214 do
1215 {
1216 pCur -= 1;
1217 ASMAtomicWriteU128U(&pCur->u128Volatile, pCur[-1].u128Normal);
1218 cToMove -= 1;
1219 } while (cToMove > 0);
1220
1221#else
1222 uint64_t u64PrevLo = pCur[-1].u128Normal.s.Lo;
1223 uint64_t u64PrevHi = pCur[-1].u128Normal.s.Hi;
1224 do
1225 {
1226 pCur -= 1;
1227 uint64_t const u64CurLo = pCur[-1].u128Normal.s.Lo;
1228 uint64_t const u64CurHi = pCur[-1].u128Normal.s.Hi;
1229 uint128_t uOldIgn;
1230 AssertStmt(ASMAtomicCmpXchgU128v2(&pCur->u128Volatile.u, u64CurHi, u64CurLo, u64PrevHi, u64PrevLo, &uOldIgn),
1231 (pCur->u128Volatile.s.Lo = u64CurLo, pCur->u128Volatile.s.Hi = u64CurHi));
1232 u64PrevLo = u64CurLo;
1233 u64PrevHi = u64CurHi;
1234 cToMove -= 1;
1235 } while (cToMove > 0);
1236#endif
1237 }
1238 }
1239 }
1240
1241 /*
1242 * Write the new entry.
1243 */
1244 PGMRAMRANGELOOKUPENTRY *pInsert = &pVM->pgm.s.aRamRangeLookup[idxLookup];
1245 if (!fUseAtomic)
1246 {
1247 pInsert->GCPhysFirstAndId = idRamRange | GCPhys;
1248 pInsert->GCPhysLast = GCPhysLast;
1249 }
1250 else
1251 {
1252 PGMRAMRANGELOOKUPENTRY NewEntry;
1253 NewEntry.GCPhysFirstAndId = idRamRange | GCPhys;
1254 NewEntry.GCPhysLast = GCPhysLast;
1255 ASMAtomicWriteU128v2(&pInsert->u128Volatile.u, NewEntry.u128Normal.s.Hi, NewEntry.u128Normal.s.Lo);
1256 }
1257
1258 /*
1259 * Update the generation and count in one go, signaling the end of the updating.
1260 */
1261 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT GenAndCount;
1262 GenAndCount.cLookupEntries = cLookupEntries + 1;
1263 GenAndCount.idGeneration = idGeneration + 1;
1264 ASMAtomicWriteU64(&pVM->pgm.s.RamRangeUnion.u64Combined, GenAndCount.u64Combined);
1265
1266 if (pidxLookup)
1267 *pidxLookup = idxLookup + 1;
1268
1269#ifdef DEBUG_bird
1270 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
1271#endif
1272 return VINF_SUCCESS;
1273}
1274
1275
1276/**
1277 * Removes @a pRam from the lookup table.
1278 *
1279 * @returns VBox status code.
1280 * @param pVM The cross context VM structure.
1281 * @param pRam The RAM range to insert into the lookup table.
1282 * @param pidxLookup Optional lookup table hint. This is updated.
1283 * @note Caller must own PGM lock.
1284 */
1285static int pgmR3PhysRamRangeRemoveLookup(PVM pVM, PPGMRAMRANGE pRam, uint32_t *pidxLookup)
1286{
1287 PGM_LOCK_ASSERT_OWNER(pVM);
1288 AssertMsg(pRam->pszDesc, ("%RGp-%RGp\n", pRam->GCPhys, pRam->GCPhysLast));
1289
1290 RTGCPHYS const GCPhys = pRam->GCPhys;
1291 RTGCPHYS const GCPhysLast = pRam->GCPhysLast;
1292 AssertLogRelMsgReturn( GCPhys != NIL_RTGCPHYS
1293 || GCPhysLast != NIL_RTGCPHYS,
1294 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n", GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc),
1295 VERR_NOT_FOUND);
1296 AssertLogRelMsgReturn( GCPhys != NIL_RTGCPHYS
1297 && GCPhysLast == GCPhys + pRam->cb - 1U
1298 && (GCPhys & GUEST_PAGE_OFFSET_MASK) == 0
1299 && (GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK
1300 && GCPhysLast > GCPhys,
1301 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n", GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc),
1302 VERR_INTERNAL_ERROR_5);
1303 uint32_t const idRamRange = pRam->idRange;
1304 AssertReturn(pVM->pgm.s.apRamRanges[idRamRange] == pRam, VERR_INTERNAL_ERROR_4);
1305 LogFlowFunc(("GCPhys=%RGp LB %RGp GCPhysLast=%RGp id=%#x %s\n", GCPhys, pRam->cb, GCPhysLast, idRamRange, pRam->pszDesc));
1306
1307 /*
1308 * Find the lookup table location.
1309 */
1310 uint32_t const cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1311 AssertLogRelMsgReturn( cLookupEntries > 0
1312 && cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), /* id=0 is unused, so < is correct. */
1313 ("%#x\n", cLookupEntries), VERR_INTERNAL_ERROR_3);
1314
1315 uint32_t idxLookup = pidxLookup ? *pidxLookup : UINT32_MAX;
1316 if ( idxLookup >= cLookupEntries
1317 || pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast != GCPhysLast
1318 || pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysFirstAndId != (GCPhys | idRamRange))
1319 {
1320 uint32_t iStart = 0;
1321 uint32_t iEnd = cLookupEntries;
1322 for (;;)
1323 {
1324 idxLookup = iStart + (iEnd - iStart) / 2;
1325 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1326 if (GCPhysLast < GCPhysEntryFirst)
1327 {
1328 AssertLogRelMsgReturn(idxLookup > iStart,
1329 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n",
1330 GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc),
1331 VERR_NOT_FOUND);
1332 iEnd = idxLookup;
1333 }
1334 else
1335 {
1336 RTGCPHYS const GCPhysEntryLast = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast;
1337 if (GCPhys > GCPhysEntryLast)
1338 {
1339 idxLookup += 1;
1340 AssertLogRelMsgReturn(idxLookup < iEnd,
1341 ("range: GCPhys=%RGp LB %RGp GCPhysLast=%RGp %s\n",
1342 GCPhys, pRam->cb, GCPhysLast, pRam->pszDesc),
1343 VERR_NOT_FOUND);
1344 iStart = idxLookup;
1345 }
1346 else
1347 {
1348 uint32_t const idEntry = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1349 AssertLogRelMsgReturn( GCPhysEntryFirst == GCPhys
1350 && GCPhysEntryLast == GCPhysLast
1351 && idEntry == idRamRange,
1352 ("Found: %RGp..%RGp id=%#x; Wanted: GCPhys=%RGp LB %RGp GCPhysLast=%RGp id=%#x %s\n",
1353 GCPhysEntryFirst, GCPhysEntryLast, idEntry,
1354 GCPhys, pRam->cb, GCPhysLast, pRam->idRange, pRam->pszDesc),
1355 VERR_NOT_FOUND);
1356 break;
1357 }
1358 }
1359 }
1360 }
1361 /* else we've got a good hint. */
1362
1363 /*
1364 * Do the actual job.
1365 *
1366 * The moving of existing table entries is done in a way that allows other
1367 * EMTs to perform concurrent lookups with the updating.
1368 */
1369 bool const fUseAtomic = pVM->enmVMState != VMSTATE_CREATING
1370 && pVM->cCpus > 1
1371#ifdef RT_ARCH_AMD64
1372 && g_CpumHostFeatures.s.fCmpXchg16b
1373#endif
1374 ;
1375
1376 /* Signal that we're modifying the lookup table: */
1377 uint32_t const idGeneration = (pVM->pgm.s.RamRangeUnion.idGeneration + 1) | 1; /* paranoia^3 */
1378 ASMAtomicWriteU32(&pVM->pgm.s.RamRangeUnion.idGeneration, idGeneration);
1379
1380 /* Do we need to shift any lookup table entries? (This is a lot simpler
1381 than insertion.) */
1382 if (idxLookup + 1U < cLookupEntries)
1383 {
1384 uint32_t cToMove = cLookupEntries - idxLookup - 1U;
1385 PGMRAMRANGELOOKUPENTRY *pCur = &pVM->pgm.s.aRamRangeLookup[idxLookup];
1386 if (!fUseAtomic)
1387 do
1388 {
1389 pCur->GCPhysFirstAndId = pCur[1].GCPhysFirstAndId;
1390 pCur->GCPhysLast = pCur[1].GCPhysLast;
1391 pCur += 1;
1392 cToMove -= 1;
1393 } while (cToMove > 0);
1394 else
1395 {
1396#if RTASM_HAVE_WRITE_U128 >= 2
1397 do
1398 {
1399 ASMAtomicWriteU128U(&pCur->u128Volatile, pCur[1].u128Normal);
1400 pCur += 1;
1401 cToMove -= 1;
1402 } while (cToMove > 0);
1403
1404#else
1405 uint64_t u64PrevLo = pCur->u128Normal.s.Lo;
1406 uint64_t u64PrevHi = pCur->u128Normal.s.Hi;
1407 do
1408 {
1409 uint64_t const u64CurLo = pCur[1].u128Normal.s.Lo;
1410 uint64_t const u64CurHi = pCur[1].u128Normal.s.Hi;
1411 uint128_t uOldIgn;
1412 AssertStmt(ASMAtomicCmpXchgU128v2(&pCur->u128Volatile.u, u64CurHi, u64CurLo, u64PrevHi, u64PrevLo, &uOldIgn),
1413 (pCur->u128Volatile.s.Lo = u64CurLo, pCur->u128Volatile.s.Hi = u64CurHi));
1414 u64PrevLo = u64CurLo;
1415 u64PrevHi = u64CurHi;
1416 pCur += 1;
1417 cToMove -= 1;
1418 } while (cToMove > 0);
1419#endif
1420 }
1421 }
1422
1423 /* Update the RAM range entry to indicate that it is no longer mapped. */
1424 pRam->GCPhys = NIL_RTGCPHYS;
1425 pRam->GCPhysLast = NIL_RTGCPHYS;
1426
1427 /*
1428 * Update the generation and count in one go, signaling the end of the updating.
1429 */
1430 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT GenAndCount;
1431 GenAndCount.cLookupEntries = cLookupEntries - 1;
1432 GenAndCount.idGeneration = idGeneration + 1;
1433 ASMAtomicWriteU64(&pVM->pgm.s.RamRangeUnion.u64Combined, GenAndCount.u64Combined);
1434
1435 if (pidxLookup)
1436 *pidxLookup = idxLookup + 1;
1437
1438 return VINF_SUCCESS;
1439}
1440
1441
1442/**
1443 * Gets the number of ram ranges.
1444 *
1445 * @returns Number of ram ranges. Returns UINT32_MAX if @a pVM is invalid.
1446 * @param pVM The cross context VM structure.
1447 */
1448VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM)
1449{
1450 VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
1451
1452 PGM_LOCK_VOID(pVM);
1453 uint32_t const cRamRanges = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1454 PGM_UNLOCK(pVM);
1455 return cRamRanges;
1456}
1457
1458
1459/**
1460 * Get information about a range.
1461 *
1462 * @returns VINF_SUCCESS or VERR_OUT_OF_RANGE.
1463 * @param pVM The cross context VM structure.
1464 * @param iRange The ordinal of the range.
1465 * @param pGCPhysStart Where to return the start of the range. Optional.
1466 * @param pGCPhysLast Where to return the address of the last byte in the
1467 * range. Optional.
1468 * @param ppszDesc Where to return the range description. Optional.
1469 * @param pfIsMmio Where to indicate that this is a pure MMIO range.
1470 * Optional.
1471 */
1472VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
1473 const char **ppszDesc, bool *pfIsMmio)
1474{
1475 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1476
1477 PGM_LOCK_VOID(pVM);
1478 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1479 if (iRange < cLookupEntries)
1480 {
1481 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[iRange]);
1482 Assert(idRamRange && idRamRange <= pVM->pgm.s.idRamRangeMax);
1483 PGMRAMRANGE const * const pRamRange = pVM->pgm.s.apRamRanges[idRamRange];
1484 AssertPtr(pRamRange);
1485
1486 if (pGCPhysStart)
1487 *pGCPhysStart = pRamRange->GCPhys;
1488 if (pGCPhysLast)
1489 *pGCPhysLast = pRamRange->GCPhysLast;
1490 if (ppszDesc)
1491 *ppszDesc = pRamRange->pszDesc;
1492 if (pfIsMmio)
1493 *pfIsMmio = !!(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO);
1494
1495 PGM_UNLOCK(pVM);
1496 return VINF_SUCCESS;
1497 }
1498 PGM_UNLOCK(pVM);
1499 return VERR_OUT_OF_RANGE;
1500}
1501
1502
1503/*********************************************************************************************************************************
1504* RAM *
1505*********************************************************************************************************************************/
1506
1507/**
1508 * Frees the specified RAM page and replaces it with the ZERO page.
1509 *
1510 * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
1511 *
1512 * @param pVM The cross context VM structure.
1513 * @param pReq Pointer to the request. This is NULL when doing a
1514 * bulk free in NEM memory mode.
1515 * @param pcPendingPages Where the number of pages waiting to be freed are
1516 * kept. This will normally be incremented. This is
1517 * NULL when doing a bulk free in NEM memory mode.
1518 * @param pPage Pointer to the page structure.
1519 * @param GCPhys The guest physical address of the page, if applicable.
1520 * @param enmNewType New page type for NEM notification, since several
1521 * callers will change the type upon successful return.
1522 *
1523 * @remarks The caller must own the PGM lock.
1524 */
1525int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
1526 PGMPAGETYPE enmNewType)
1527{
1528 /*
1529 * Assert sanity.
1530 */
1531 PGM_LOCK_ASSERT_OWNER(pVM);
1532 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1533 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
1534 {
1535 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
1536 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
1537 }
1538
1539 /** @todo What about ballooning of large pages??! */
1540 Assert( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1541 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1542
1543 if ( PGM_PAGE_IS_ZERO(pPage)
1544 || PGM_PAGE_IS_BALLOONED(pPage))
1545 return VINF_SUCCESS;
1546
1547 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
1548 Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
1549 if (RT_UNLIKELY(!PGM_IS_IN_NEM_MODE(pVM)
1550 ? idPage == NIL_GMM_PAGEID
1551 || idPage > GMM_PAGEID_LAST
1552 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID
1553 : idPage != NIL_GMM_PAGEID))
1554 {
1555 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
1556 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
1557 }
1558#ifdef VBOX_WITH_NATIVE_NEM
1559 const RTHCPHYS HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
1560#endif
1561
1562 /* update page count stats. */
1563 if (PGM_PAGE_IS_SHARED(pPage))
1564 pVM->pgm.s.cSharedPages--;
1565 else
1566 pVM->pgm.s.cPrivatePages--;
1567 pVM->pgm.s.cZeroPages++;
1568
1569 /* Deal with write monitored pages. */
1570 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1571 {
1572 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1573 pVM->pgm.s.cWrittenToPages++;
1574 }
1575 PGM_PAGE_CLEAR_CODE_PAGE(pVM, pPage); /* No callback needed, IEMTlbInvalidateAllPhysicalAllCpus is called below. */
1576
1577 /*
1578 * pPage = ZERO page.
1579 */
1580 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
1581 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1582 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1583 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
1584 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
1585 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
1586
1587 /* Flush physical page map TLB entry. */
1588 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
1589 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_FREED); /// @todo move to the perform step.
1590
1591#ifdef VBOX_WITH_PGM_NEM_MODE
1592 /*
1593 * Skip the rest if we're doing a bulk free in NEM memory mode.
1594 */
1595 if (!pReq)
1596 return VINF_SUCCESS;
1597 AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1598#endif
1599
1600#ifdef VBOX_WITH_NATIVE_NEM
1601 /* Notify NEM. */
1602 /** @todo Remove this one? */
1603 if (VM_IS_NEM_ENABLED(pVM))
1604 {
1605 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1606 NEMHCNotifyPhysPageChanged(pVM, GCPhys, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg, pVM->pgm.s.abZeroPg,
1607 pgmPhysPageCalcNemProtection(pPage, enmNewType), enmNewType, &u2State);
1608 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1609 }
1610#else
1611 RT_NOREF(enmNewType);
1612#endif
1613
1614 /*
1615 * Make sure it's not in the handy page array.
1616 */
1617 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
1618 {
1619 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
1620 {
1621 pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
1622 pVM->pgm.s.aHandyPages[i].fZeroed = false;
1623 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
1624 break;
1625 }
1626 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
1627 {
1628 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
1629 break;
1630 }
1631 }
1632
1633 /*
1634 * Push it onto the page array.
1635 */
1636 uint32_t iPage = *pcPendingPages;
1637 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
1638 *pcPendingPages += 1;
1639
1640 pReq->aPages[iPage].idPage = idPage;
1641
1642 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
1643 return VINF_SUCCESS;
1644
1645 /*
1646 * Flush the pages.
1647 */
1648 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
1649 if (RT_SUCCESS(rc))
1650 {
1651 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1652 *pcPendingPages = 0;
1653 }
1654 return rc;
1655}
1656
1657
1658/**
1659 * Frees a range of pages, replacing them with ZERO pages of the specified type.
1660 *
1661 * @returns VBox status code.
1662 * @param pVM The cross context VM structure.
1663 * @param pRam The RAM range in which the pages resides.
1664 * @param GCPhys The address of the first page.
1665 * @param GCPhysLast The address of the last page.
1666 * @param pvMmio2 Pointer to the ring-3 mapping of any MMIO2 memory that
1667 * will replace the pages we're freeing up.
1668 */
1669static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, void *pvMmio2)
1670{
1671 PGM_LOCK_ASSERT_OWNER(pVM);
1672
1673#ifdef VBOX_WITH_PGM_NEM_MODE
1674 /*
1675 * In simplified memory mode we don't actually free the memory,
1676 * we just unmap it and let NEM do any unlocking of it.
1677 */
1678 if (pVM->pgm.s.fNemMode)
1679 {
1680 Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
1681 uint8_t u2State = 0; /* (We don't support UINT8_MAX here.) */
1682 if (VM_IS_NEM_ENABLED(pVM))
1683 {
1684 uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
1685 int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify,
1686 pRam->pbR3 ? pRam->pbR3 + GCPhys - pRam->GCPhys : NULL,
1687 pvMmio2, &u2State, NULL /*puNemRange*/);
1688 AssertLogRelRCReturn(rc, rc);
1689 }
1690
1691 /* Iterate the pages. */
1692 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1693 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> GUEST_PAGE_SHIFT) + 1;
1694 while (cPagesLeft-- > 0)
1695 {
1696 int rc = pgmPhysFreePage(pVM, NULL, NULL, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
1697 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1698
1699 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
1700 PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
1701
1702 GCPhys += GUEST_PAGE_SIZE;
1703 pPageDst++;
1704 }
1705 return VINF_SUCCESS;
1706 }
1707#else /* !VBOX_WITH_PGM_NEM_MODE */
1708 RT_NOREF(pvMmio2);
1709#endif /* !VBOX_WITH_PGM_NEM_MODE */
1710
1711 /*
1712 * Regular mode.
1713 */
1714 /* Prepare. */
1715 uint32_t cPendingPages = 0;
1716 PGMMFREEPAGESREQ pReq;
1717 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1718 AssertLogRelRCReturn(rc, rc);
1719
1720#ifdef VBOX_WITH_NATIVE_NEM
1721 /* Tell NEM up-front. */
1722 uint8_t u2State = UINT8_MAX;
1723 if (VM_IS_NEM_ENABLED(pVM))
1724 {
1725 uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
1726 rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify, NULL, pvMmio2,
1727 &u2State, NULL /*puNemRange*/);
1728 AssertLogRelRCReturnStmt(rc, GMMR3FreePagesCleanup(pReq), rc);
1729 }
1730#endif
1731
1732 /* Iterate the pages. */
1733 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1734 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> GUEST_PAGE_SHIFT) + 1;
1735 while (cPagesLeft-- > 0)
1736 {
1737 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
1738 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1739
1740 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
1741#ifdef VBOX_WITH_NATIVE_NEM
1742 if (u2State != UINT8_MAX)
1743 PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
1744#endif
1745
1746 GCPhys += GUEST_PAGE_SIZE;
1747 pPageDst++;
1748 }
1749
1750 /* Finish pending and cleanup. */
1751 if (cPendingPages)
1752 {
1753 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1754 AssertLogRelRCReturn(rc, rc);
1755 }
1756 GMMR3FreePagesCleanup(pReq);
1757
1758 return rc;
1759}
1760
1761
1762/**
1763 * Wrapper around VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE.
1764 */
1765static int pgmR3PhysAllocateRamRange(PVM pVM, PVMCPU pVCpu, uint32_t cGuestPages, uint32_t fFlags, PPGMRAMRANGE *ppRamRange)
1766{
1767 int rc;
1768 PGMPHYSALLOCATERAMRANGEREQ AllocRangeReq;
1769 AllocRangeReq.idNewRange = UINT32_MAX / 4;
1770 if (SUPR3IsDriverless())
1771 rc = pgmPhysRamRangeAllocCommon(pVM, cGuestPages, fFlags, &AllocRangeReq.idNewRange);
1772 else
1773 {
1774 AllocRangeReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
1775 AllocRangeReq.Hdr.cbReq = sizeof(AllocRangeReq);
1776 AllocRangeReq.cbGuestPage = GUEST_PAGE_SIZE;
1777 AllocRangeReq.cGuestPages = cGuestPages;
1778 AllocRangeReq.fFlags = fFlags;
1779 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE, 0 /*u64Arg*/, &AllocRangeReq.Hdr);
1780 }
1781 if (RT_SUCCESS(rc))
1782 {
1783 Assert(AllocRangeReq.idNewRange != 0);
1784 Assert(AllocRangeReq.idNewRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
1785 AssertPtr(pVM->pgm.s.apRamRanges[AllocRangeReq.idNewRange]);
1786 *ppRamRange = pVM->pgm.s.apRamRanges[AllocRangeReq.idNewRange];
1787 return VINF_SUCCESS;
1788 }
1789
1790 *ppRamRange = NULL;
1791 return rc;
1792}
1793
1794
1795/**
1796 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
1797 *
1798 * In NEM mode, this will allocate the pages backing the RAM range and this may
1799 * fail. NEM registration may also fail. (In regular HM mode it won't fail.)
1800 *
1801 * @returns VBox status code.
1802 * @param pVM The cross context VM structure.
1803 * @param pNew The new RAM range.
1804 * @param GCPhys The address of the RAM range.
1805 * @param GCPhysLast The last address of the RAM range.
1806 * @param pszDesc The description.
1807 * @param pidxLookup The lookup table insertion point.
1808 */
1809static int pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
1810 const char *pszDesc, uint32_t *pidxLookup)
1811{
1812 /*
1813 * Initialize the range.
1814 */
1815 Assert(pNew->cb == GCPhysLast - GCPhys + 1U); RT_NOREF(GCPhysLast);
1816 pNew->pszDesc = pszDesc;
1817 pNew->uNemRange = UINT32_MAX;
1818 pNew->pbR3 = NULL;
1819 pNew->paLSPages = NULL;
1820
1821 uint32_t const cPages = pNew->cb >> GUEST_PAGE_SHIFT;
1822#ifdef VBOX_WITH_PGM_NEM_MODE
1823 if (!pVM->pgm.s.fNemMode)
1824#endif
1825 {
1826 RTGCPHYS iPage = cPages;
1827 while (iPage-- > 0)
1828 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
1829
1830 /* Update the page count stats. */
1831 pVM->pgm.s.cZeroPages += cPages;
1832 pVM->pgm.s.cAllPages += cPages;
1833 }
1834#ifdef VBOX_WITH_PGM_NEM_MODE
1835 else
1836 {
1837 int rc = SUPR3PageAlloc(RT_ALIGN_Z(pNew->cb, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT,
1838 pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, (void **)&pNew->pbR3);
1839 if (RT_FAILURE(rc))
1840 return rc;
1841
1842 RTGCPHYS iPage = cPages;
1843 while (iPage-- > 0)
1844 PGM_PAGE_INIT(&pNew->aPages[iPage], UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
1845 PGMPAGETYPE_RAM, PGM_PAGE_STATE_ALLOCATED);
1846
1847 /* Update the page count stats. */
1848 pVM->pgm.s.cPrivatePages += cPages;
1849 pVM->pgm.s.cAllPages += cPages;
1850 }
1851#endif
1852
1853 /*
1854 * Insert it into the lookup table.
1855 */
1856 int rc = pgmR3PhysRamRangeInsertLookup(pVM, pNew, GCPhys, pidxLookup);
1857 AssertRCReturn(rc, rc);
1858
1859#ifdef VBOX_WITH_NATIVE_NEM
1860 /*
1861 * Notify NEM now that it has been linked.
1862 *
1863 * As above, it is assumed that on failure the VM creation will fail, so
1864 * no extra cleanup is needed here.
1865 */
1866 if (VM_IS_NEM_ENABLED(pVM))
1867 {
1868 uint8_t u2State = UINT8_MAX;
1869 rc = NEMR3NotifyPhysRamRegister(pVM, GCPhys, pNew->cb, pNew->pbR3, &u2State, &pNew->uNemRange);
1870 if (RT_SUCCESS(rc) && u2State != UINT8_MAX)
1871 pgmPhysSetNemStateForPages(&pNew->aPages[0], cPages, u2State);
1872 return rc;
1873 }
1874#endif
1875 return VINF_SUCCESS;
1876}
1877
1878
1879/**
1880 * Worker for PGMR3PhysRegisterRam called with the PGM lock.
1881 *
1882 * The caller releases the lock.
1883 */
1884static int pgmR3PhysRegisterRamWorker(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc,
1885 uint32_t const cRamRanges, RTGCPHYS const GCPhysLast)
1886{
1887#ifdef VBOX_STRICT
1888 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
1889#endif
1890
1891 /*
1892 * Check that we've got enough free RAM ranges.
1893 */
1894 AssertLogRelMsgReturn((uint64_t)pVM->pgm.s.idRamRangeMax + cRamRanges + 1 <= RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup),
1895 ("idRamRangeMax=%#RX32 vs GCPhys=%RGp cb=%RGp / %#RX32 ranges (%s)\n",
1896 pVM->pgm.s.idRamRangeMax, GCPhys, cb, cRamRanges, pszDesc),
1897 VERR_PGM_TOO_MANY_RAM_RANGES);
1898
1899 /*
1900 * Check for conflicts via the lookup table. We search it backwards,
1901 * assuming that memory is added in ascending order by address.
1902 */
1903 uint32_t idxLookup = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1904 while (idxLookup)
1905 {
1906 if (GCPhys > pVM->pgm.s.aRamRangeLookup[idxLookup - 1].GCPhysLast)
1907 break;
1908 idxLookup--;
1909 RTGCPHYS const GCPhysCur = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1910 AssertLogRelMsgReturn( GCPhysLast < GCPhysCur
1911 || GCPhys > pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast,
1912 ("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1913 GCPhys, GCPhysLast, pszDesc, GCPhysCur, pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast,
1914 pVM->pgm.s.apRamRanges[PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup])]->pszDesc),
1915 VERR_PGM_RAM_CONFLICT);
1916 }
1917
1918 /*
1919 * Register it with GMM (the API bitches).
1920 */
1921 const RTGCPHYS cPages = cb >> GUEST_PAGE_SHIFT;
1922 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
1923 if (RT_FAILURE(rc))
1924 return rc;
1925
1926 /*
1927 * Create the required chunks.
1928 */
1929 RTGCPHYS cPagesLeft = cPages;
1930 RTGCPHYS GCPhysChunk = GCPhys;
1931 uint32_t idxChunk = 0;
1932 while (cPagesLeft > 0)
1933 {
1934 uint32_t cPagesInChunk = cPagesLeft;
1935 if (cPagesInChunk > PGM_MAX_PAGES_PER_RAM_RANGE)
1936 cPagesInChunk = PGM_MAX_PAGES_PER_RAM_RANGE;
1937
1938 const char *pszDescChunk = idxChunk == 0
1939 ? pszDesc
1940 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, idxChunk + 1);
1941 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
1942
1943 /*
1944 * Allocate a RAM range.
1945 */
1946 PPGMRAMRANGE pNew = NULL;
1947 rc = pgmR3PhysAllocateRamRange(pVM, pVCpu, cPagesInChunk, 0 /*fFlags*/, &pNew);
1948 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1949 ("pgmR3PhysAllocateRamRange failed: GCPhysChunk=%RGp cPagesInChunk=%#RX32 (%s): %Rrc\n",
1950 GCPhysChunk, cPagesInChunk, pszDescChunk, rc),
1951 rc);
1952
1953 /*
1954 * Ok, init and link the range.
1955 */
1956 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhysChunk,
1957 GCPhysChunk + ((RTGCPHYS)cPagesInChunk << GUEST_PAGE_SHIFT) - 1U,
1958 pszDescChunk, &idxLookup);
1959 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1960 ("pgmR3PhysInitAndLinkRamRange failed: GCPhysChunk=%RGp cPagesInChunk=%#RX32 (%s): %Rrc\n",
1961 GCPhysChunk, cPagesInChunk, pszDescChunk, rc),
1962 rc);
1963
1964 /* advance */
1965 GCPhysChunk += (RTGCPHYS)cPagesInChunk << GUEST_PAGE_SHIFT;
1966 cPagesLeft -= cPagesInChunk;
1967 idxChunk++;
1968 }
1969
1970 return rc;
1971}
1972
1973
1974/**
1975 * Sets up a range RAM.
1976 *
1977 * This will check for conflicting registrations, make a resource reservation
1978 * for the memory (with GMM), and setup the per-page tracking structures
1979 * (PGMPAGE).
1980 *
1981 * @returns VBox status code.
1982 * @param pVM The cross context VM structure.
1983 * @param GCPhys The physical address of the RAM.
1984 * @param cb The size of the RAM.
1985 * @param pszDesc The description - not copied, so, don't free or change it.
1986 */
1987VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
1988{
1989 /*
1990 * Validate input.
1991 */
1992 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
1993 AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1994 AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1995 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
1996 RTGCPHYS const GCPhysLast = GCPhys + (cb - 1);
1997 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
1998 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1999 PVMCPU const pVCpu = VMMGetCpu(pVM);
2000 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
2001 AssertReturn(pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
2002
2003 /*
2004 * Calculate the number of RAM ranges required.
2005 * See also pgmPhysMmio2CalcChunkCount.
2006 */
2007 uint32_t const cPagesPerChunk = PGM_MAX_PAGES_PER_RAM_RANGE;
2008 uint32_t const cRamRanges = (uint32_t)(((cb >> GUEST_PAGE_SHIFT) + cPagesPerChunk - 1) / cPagesPerChunk);
2009 AssertLogRelMsgReturn(cRamRanges * (RTGCPHYS)cPagesPerChunk * GUEST_PAGE_SIZE >= cb,
2010 ("cb=%RGp cRamRanges=%#RX32 cPagesPerChunk=%#RX32\n", cb, cRamRanges, cPagesPerChunk),
2011 VERR_OUT_OF_RANGE);
2012
2013 PGM_LOCK_VOID(pVM);
2014
2015 int rc = pgmR3PhysRegisterRamWorker(pVM, pVCpu, GCPhys, cb, pszDesc, cRamRanges, GCPhysLast);
2016#ifdef VBOX_STRICT
2017 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
2018#endif
2019
2020 PGM_UNLOCK(pVM);
2021 return rc;
2022}
2023
2024
2025/**
2026 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
2027 *
2028 * We do this late in the init process so that all the ROM and MMIO ranges have
2029 * been registered already and we don't go wasting memory on them.
2030 *
2031 * @returns VBox status code.
2032 *
2033 * @param pVM The cross context VM structure.
2034 */
2035int pgmR3PhysRamPreAllocate(PVM pVM)
2036{
2037 Assert(pVM->pgm.s.fRamPreAlloc);
2038 Log(("pgmR3PhysRamPreAllocate: enter\n"));
2039#ifdef VBOX_WITH_PGM_NEM_MODE
2040 AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
2041#endif
2042
2043 /*
2044 * Walk the RAM ranges and allocate all RAM pages, halt at
2045 * the first allocation error.
2046 */
2047 uint64_t cPages = 0;
2048 uint64_t NanoTS = RTTimeNanoTS();
2049 PGM_LOCK_VOID(pVM);
2050 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
2051 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
2052 {
2053 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2054 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
2055 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
2056 AssertContinue(pRam);
2057
2058 PPGMPAGE pPage = &pRam->aPages[0];
2059 RTGCPHYS GCPhys = pRam->GCPhys;
2060 uint32_t cLeft = pRam->cb >> GUEST_PAGE_SHIFT;
2061 while (cLeft-- > 0)
2062 {
2063 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
2064 {
2065 switch (PGM_PAGE_GET_STATE(pPage))
2066 {
2067 case PGM_PAGE_STATE_ZERO:
2068 {
2069 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
2070 if (RT_FAILURE(rc))
2071 {
2072 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
2073 PGM_UNLOCK(pVM);
2074 return rc;
2075 }
2076 cPages++;
2077 break;
2078 }
2079
2080 case PGM_PAGE_STATE_BALLOONED:
2081 case PGM_PAGE_STATE_ALLOCATED:
2082 case PGM_PAGE_STATE_WRITE_MONITORED:
2083 case PGM_PAGE_STATE_SHARED:
2084 /* nothing to do here. */
2085 break;
2086 }
2087 }
2088
2089 /* next */
2090 pPage++;
2091 GCPhys += GUEST_PAGE_SIZE;
2092 }
2093 }
2094 PGM_UNLOCK(pVM);
2095 NanoTS = RTTimeNanoTS() - NanoTS;
2096
2097 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
2098 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
2099 return VINF_SUCCESS;
2100}
2101
2102
2103/**
2104 * Checks shared page checksums.
2105 *
2106 * @param pVM The cross context VM structure.
2107 */
2108void pgmR3PhysAssertSharedPageChecksums(PVM pVM)
2109{
2110#ifdef VBOX_STRICT
2111 PGM_LOCK_VOID(pVM);
2112
2113 if (pVM->pgm.s.cSharedPages > 0)
2114 {
2115 /*
2116 * Walk the ram ranges.
2117 */
2118 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
2119 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
2120 {
2121 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2122 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
2123 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
2124 AssertContinue(pRam);
2125
2126 uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
2127 AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb,
2128 ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
2129
2130 while (iPage-- > 0)
2131 {
2132 PPGMPAGE pPage = &pRam->aPages[iPage];
2133 if (PGM_PAGE_IS_SHARED(pPage))
2134 {
2135 uint32_t u32Checksum = pPage->s.u2Unused0/* | ((uint32_t)pPage->s.u2Unused1 << 8)*/;
2136 if (!u32Checksum)
2137 {
2138 RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
2139 void const *pvPage;
2140 int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhysPage, &pvPage);
2141 if (RT_SUCCESS(rc))
2142 {
2143 uint32_t u32Checksum2 = RTCrc32(pvPage, GUEST_PAGE_SIZE);
2144# if 0
2145 AssertMsg((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum, ("GCPhysPage=%RGp\n", GCPhysPage));
2146# else
2147 if ((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum)
2148 LogFlow(("shpg %#x @ %RGp %#x [OK]\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
2149 else
2150 AssertMsgFailed(("shpg %#x @ %RGp %#x\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
2151# endif
2152 }
2153 else
2154 AssertRC(rc);
2155 }
2156 }
2157
2158 } /* for each page */
2159
2160 } /* for each ram range */
2161 }
2162
2163 PGM_UNLOCK(pVM);
2164#endif /* VBOX_STRICT */
2165 NOREF(pVM);
2166}
2167
2168
2169/**
2170 * Resets the physical memory state.
2171 *
2172 * ASSUMES that the caller owns the PGM lock.
2173 *
2174 * @returns VBox status code.
2175 * @param pVM The cross context VM structure.
2176 */
2177int pgmR3PhysRamReset(PVM pVM)
2178{
2179 PGM_LOCK_ASSERT_OWNER(pVM);
2180
2181 /* Reset the memory balloon. */
2182 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
2183 AssertRC(rc);
2184
2185#ifdef VBOX_WITH_PAGE_SHARING
2186 /* Clear all registered shared modules. */
2187 pgmR3PhysAssertSharedPageChecksums(pVM);
2188 rc = GMMR3ResetSharedModules(pVM);
2189 AssertRC(rc);
2190#endif
2191 /* Reset counters. */
2192 pVM->pgm.s.cReusedSharedPages = 0;
2193 pVM->pgm.s.cBalloonedPages = 0;
2194
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/**
2200 * Resets (zeros) the RAM after all devices and components have been reset.
2201 *
2202 * ASSUMES that the caller owns the PGM lock.
2203 *
2204 * @returns VBox status code.
2205 * @param pVM The cross context VM structure.
2206 */
2207int pgmR3PhysRamZeroAll(PVM pVM)
2208{
2209 PGM_LOCK_ASSERT_OWNER(pVM);
2210
2211 /*
2212 * We batch up pages that should be freed instead of calling GMM for
2213 * each and every one of them.
2214 */
2215 uint32_t cPendingPages = 0;
2216 PGMMFREEPAGESREQ pReq;
2217 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2218 AssertLogRelRCReturn(rc, rc);
2219
2220 /*
2221 * Walk the ram ranges.
2222 */
2223 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
2224 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
2225 {
2226 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
2227 Assert(pRam || idRamRange == 0);
2228 if (!pRam) continue;
2229 Assert(pRam->idRange == idRamRange);
2230
2231 uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
2232 AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
2233
2234 if ( !pVM->pgm.s.fRamPreAlloc
2235#ifdef VBOX_WITH_PGM_NEM_MODE
2236 && !pVM->pgm.s.fNemMode
2237#endif
2238 && pVM->pgm.s.fZeroRamPagesOnReset)
2239 {
2240 /* Replace all RAM pages by ZERO pages. */
2241 while (iPage-- > 0)
2242 {
2243 PPGMPAGE pPage = &pRam->aPages[iPage];
2244 switch (PGM_PAGE_GET_TYPE(pPage))
2245 {
2246 case PGMPAGETYPE_RAM:
2247 /* Do not replace pages part of a 2 MB continuous range
2248 with zero pages, but zero them instead. */
2249 if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
2250 || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2251 {
2252 void *pvPage;
2253 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pvPage);
2254 AssertLogRelRCReturn(rc, rc);
2255 RT_BZERO(pvPage, GUEST_PAGE_SIZE);
2256 }
2257 else if (PGM_PAGE_IS_BALLOONED(pPage))
2258 {
2259 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
2260 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2261 }
2262 else if (!PGM_PAGE_IS_ZERO(pPage))
2263 {
2264 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage,
2265 pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), PGMPAGETYPE_RAM);
2266 AssertLogRelRCReturn(rc, rc);
2267 }
2268 break;
2269
2270 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2271 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
2272 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT),
2273 pRam, true /*fDoAccounting*/, false /*fFlushIemTlbs*/);
2274 break;
2275
2276 case PGMPAGETYPE_MMIO2:
2277 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2278 case PGMPAGETYPE_ROM:
2279 case PGMPAGETYPE_MMIO:
2280 break;
2281 default:
2282 AssertFailed();
2283 }
2284 } /* for each page */
2285 }
2286 else
2287 {
2288 /* Zero the memory. */
2289 while (iPage-- > 0)
2290 {
2291 PPGMPAGE pPage = &pRam->aPages[iPage];
2292 switch (PGM_PAGE_GET_TYPE(pPage))
2293 {
2294 case PGMPAGETYPE_RAM:
2295 switch (PGM_PAGE_GET_STATE(pPage))
2296 {
2297 case PGM_PAGE_STATE_ZERO:
2298 break;
2299
2300 case PGM_PAGE_STATE_BALLOONED:
2301 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
2302 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2303 break;
2304
2305 case PGM_PAGE_STATE_SHARED:
2306 case PGM_PAGE_STATE_WRITE_MONITORED:
2307 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
2308 AssertLogRelRCReturn(rc, rc);
2309 RT_FALL_THRU();
2310
2311 case PGM_PAGE_STATE_ALLOCATED:
2312 if (pVM->pgm.s.fZeroRamPagesOnReset)
2313 {
2314 void *pvPage;
2315 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pvPage);
2316 AssertLogRelRCReturn(rc, rc);
2317 RT_BZERO(pvPage, GUEST_PAGE_SIZE);
2318 }
2319 break;
2320 }
2321 break;
2322
2323 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2324 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
2325 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT),
2326 pRam, true /*fDoAccounting*/, false /*fFlushIemTlbs*/);
2327 break;
2328
2329 case PGMPAGETYPE_MMIO2:
2330 case PGMPAGETYPE_ROM_SHADOW:
2331 case PGMPAGETYPE_ROM:
2332 case PGMPAGETYPE_MMIO:
2333 break;
2334 default:
2335 AssertFailed();
2336
2337 }
2338 } /* for each page */
2339 }
2340 }
2341
2342 /*
2343 * Finish off any pages pending freeing.
2344 */
2345 if (cPendingPages)
2346 {
2347 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2348 AssertLogRelRCReturn(rc, rc);
2349 }
2350 GMMR3FreePagesCleanup(pReq);
2351
2352 /*
2353 * Flush the IEM TLB, just to be sure it really is done.
2354 */
2355 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_ZERO_ALL);
2356
2357 return VINF_SUCCESS;
2358}
2359
2360
2361/**
2362 * Frees all RAM during VM termination
2363 *
2364 * ASSUMES that the caller owns the PGM lock.
2365 *
2366 * @returns VBox status code.
2367 * @param pVM The cross context VM structure.
2368 */
2369int pgmR3PhysRamTerm(PVM pVM)
2370{
2371 PGM_LOCK_ASSERT_OWNER(pVM);
2372
2373 /* Reset the memory balloon. */
2374 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
2375 AssertRC(rc);
2376
2377#ifdef VBOX_WITH_PAGE_SHARING
2378 /*
2379 * Clear all registered shared modules.
2380 */
2381 pgmR3PhysAssertSharedPageChecksums(pVM);
2382 rc = GMMR3ResetSharedModules(pVM);
2383 AssertRC(rc);
2384
2385 /*
2386 * Flush the handy pages updates to make sure no shared pages are hiding
2387 * in there. (Not unlikely if the VM shuts down, apparently.)
2388 */
2389# ifdef VBOX_WITH_PGM_NEM_MODE
2390 if (!pVM->pgm.s.fNemMode)
2391# endif
2392 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_FLUSH_HANDY_PAGES, 0, NULL);
2393#endif
2394
2395 /*
2396 * We batch up pages that should be freed instead of calling GMM for
2397 * each and every one of them.
2398 */
2399 uint32_t cPendingPages = 0;
2400 PGMMFREEPAGESREQ pReq;
2401 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2402 AssertLogRelRCReturn(rc, rc);
2403
2404 /*
2405 * Walk the ram ranges.
2406 */
2407 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
2408 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
2409 {
2410 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
2411 Assert(pRam || idRamRange == 0);
2412 if (!pRam) continue;
2413 Assert(pRam->idRange == idRamRange);
2414
2415 uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
2416 AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
2417
2418 while (iPage-- > 0)
2419 {
2420 PPGMPAGE pPage = &pRam->aPages[iPage];
2421 switch (PGM_PAGE_GET_TYPE(pPage))
2422 {
2423 case PGMPAGETYPE_RAM:
2424 /* Free all shared pages. Private pages are automatically freed during GMM VM cleanup. */
2425 /** @todo change this to explicitly free private pages here. */
2426 if (PGM_PAGE_IS_SHARED(pPage))
2427 {
2428 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage,
2429 pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), PGMPAGETYPE_RAM);
2430 AssertLogRelRCReturn(rc, rc);
2431 }
2432 break;
2433
2434 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2435 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
2436 case PGMPAGETYPE_MMIO2:
2437 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2438 case PGMPAGETYPE_ROM:
2439 case PGMPAGETYPE_MMIO:
2440 break;
2441 default:
2442 AssertFailed();
2443 }
2444 } /* for each page */
2445 }
2446
2447 /*
2448 * Finish off any pages pending freeing.
2449 */
2450 if (cPendingPages)
2451 {
2452 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2453 AssertLogRelRCReturn(rc, rc);
2454 }
2455 GMMR3FreePagesCleanup(pReq);
2456 return VINF_SUCCESS;
2457}
2458
2459
2460
2461/*********************************************************************************************************************************
2462* MMIO *
2463*********************************************************************************************************************************/
2464
2465/**
2466 * This is the interface IOM is using to register an MMIO region (unmapped).
2467 *
2468 *
2469 * @returns VBox status code.
2470 *
2471 * @param pVM The cross context VM structure.
2472 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2473 * @param cb The size of the MMIO region.
2474 * @param pszDesc The description of the MMIO region.
2475 * @param pidRamRange Where to return the RAM range ID for the MMIO region
2476 * on success.
2477 * @thread EMT(0)
2478 */
2479VMMR3_INT_DECL(int) PGMR3PhysMmioRegister(PVM pVM, PVMCPU pVCpu, RTGCPHYS cb, const char *pszDesc, uint16_t *pidRamRange)
2480{
2481 /*
2482 * Assert assumptions.
2483 */
2484 AssertPtrReturn(pidRamRange, VERR_INVALID_POINTER);
2485 *pidRamRange = UINT16_MAX;
2486 AssertReturn(pVCpu == VMMGetCpu(pVM) && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
2487 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
2488 /// @todo AssertReturn(!pVM->pgm.s.fRamRangesFrozen, VERR_WRONG_ORDER);
2489 AssertReturn(cb <= ((RTGCPHYS)PGM_MAX_PAGES_PER_RAM_RANGE << GUEST_PAGE_SHIFT), VERR_OUT_OF_RANGE);
2490 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2491 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2492 AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
2493
2494 /*
2495 * Take the PGM lock and allocate an ad-hoc MMIO RAM range.
2496 */
2497 int rc = PGM_LOCK(pVM);
2498 AssertRCReturn(rc, rc);
2499
2500 uint32_t const cPages = cb >> GUEST_PAGE_SHIFT;
2501 PPGMRAMRANGE pNew = NULL;
2502 rc = pgmR3PhysAllocateRamRange(pVM, pVCpu, cPages, PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO, &pNew);
2503 AssertLogRelMsg(RT_SUCCESS(rc), ("pgmR3PhysAllocateRamRange failed: cPages=%#RX32 (%s): %Rrc\n", cPages, pszDesc, rc));
2504 if (RT_SUCCESS(rc))
2505 {
2506 /* Initialize the range. */
2507 pNew->pszDesc = pszDesc;
2508 pNew->uNemRange = UINT32_MAX;
2509 pNew->pbR3 = NULL;
2510 pNew->paLSPages = NULL;
2511 Assert(pNew->fFlags == PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO && pNew->cb == cb);
2512
2513 uint32_t iPage = cPages;
2514 while (iPage-- > 0)
2515 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
2516 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
2517
2518 /* update the page count stats. */
2519 pVM->pgm.s.cPureMmioPages += cPages;
2520 pVM->pgm.s.cAllPages += cPages;
2521
2522 /*
2523 * Set the return value, release lock and return to IOM.
2524 */
2525 *pidRamRange = pNew->idRange;
2526 }
2527
2528 PGM_UNLOCK(pVM);
2529 return rc;
2530}
2531
2532
2533/**
2534 * Worker for PGMR3PhysMmioMap that's called owning the lock.
2535 */
2536static int pgmR3PhysMmioMapLocked(PVM pVM, PVMCPU pVCpu, RTGCPHYS const GCPhys, RTGCPHYS const cb, RTGCPHYS const GCPhysLast,
2537 PPGMRAMRANGE const pMmioRamRange, PGMPHYSHANDLERTYPE const hType, uint64_t const uUser)
2538{
2539 /* Check that the range isn't mapped already. */
2540 AssertLogRelMsgReturn(pMmioRamRange->GCPhys == NIL_RTGCPHYS,
2541 ("desired %RGp mapping for '%s' - already mapped at %RGp!\n",
2542 GCPhys, pMmioRamRange->pszDesc, pMmioRamRange->GCPhys),
2543 VERR_ALREADY_EXISTS);
2544
2545 /*
2546 * Now, check if this falls into a regular RAM range or if we should use
2547 * the ad-hoc one (idRamRange).
2548 */
2549 int rc;
2550 uint32_t idxInsert = UINT32_MAX;
2551 PPGMRAMRANGE const pOverlappingRange = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxInsert);
2552 if (pOverlappingRange)
2553 {
2554 /* Simplification: all within the same range. */
2555 AssertLogRelMsgReturn( GCPhys >= pOverlappingRange->GCPhys
2556 && GCPhysLast <= pOverlappingRange->GCPhysLast,
2557 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
2558 GCPhys, GCPhysLast, pMmioRamRange->pszDesc,
2559 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc),
2560 VERR_PGM_RAM_CONFLICT);
2561
2562 /* Check that is isn't an ad hoc range, but a real RAM range. */
2563 AssertLogRelMsgReturn(!PGM_RAM_RANGE_IS_AD_HOC(pOverlappingRange),
2564 ("%RGp-%RGp (MMIO/%s) mapping attempt in non-RAM range: %RGp-%RGp (%s)\n",
2565 GCPhys, GCPhysLast, pMmioRamRange->pszDesc,
2566 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc),
2567 VERR_PGM_RAM_CONFLICT);
2568
2569 /* Check that it's all RAM or MMIO pages. */
2570 PCPGMPAGE pPage = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT];
2571 uint32_t cLeft = cb >> GUEST_PAGE_SHIFT;
2572 while (cLeft-- > 0)
2573 {
2574 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
2575 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO, /** @todo MMIO type isn't right */
2576 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
2577 GCPhys, GCPhysLast, pMmioRamRange->pszDesc, pOverlappingRange->GCPhys,
2578 PGM_PAGE_GET_TYPE(pPage), pOverlappingRange->pszDesc),
2579 VERR_PGM_RAM_CONFLICT);
2580 pPage++;
2581 }
2582
2583 /*
2584 * Make all the pages in the range MMIO/ZERO pages, freeing any
2585 * RAM pages currently mapped here. This might not be 100% correct
2586 * for PCI memory, but we're doing the same thing for MMIO2 pages.
2587 */
2588 rc = pgmR3PhysFreePageRange(pVM, pOverlappingRange, GCPhys, GCPhysLast, NULL);
2589 AssertRCReturn(rc, rc);
2590
2591 /* Force a PGM pool flush as guest ram references have been changed. */
2592 /** @todo not entirely SMP safe; assuming for now the guest takes
2593 * care of this internally (not touch mapped mmio while changing the
2594 * mapping). */
2595 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2596 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2597 }
2598 else
2599 {
2600 /*
2601 * No RAM range, use the ad hoc one (idRamRange).
2602 *
2603 * Note that we don't have to tell REM about this range because
2604 * PGMHandlerPhysicalRegisterEx will do that for us.
2605 */
2606 AssertLogRelReturn(idxInsert <= pVM->pgm.s.RamRangeUnion.cLookupEntries, VERR_INTERNAL_ERROR_4);
2607 Log(("PGMR3PhysMmioMap: Inserting ad hoc MMIO range #%x for %RGp-%RGp %s\n",
2608 pMmioRamRange->idRange, GCPhys, GCPhysLast, pMmioRamRange->pszDesc));
2609
2610 Assert(PGM_PAGE_GET_TYPE(&pMmioRamRange->aPages[0]) == PGMPAGETYPE_MMIO);
2611
2612 /* We ASSUME that all the pages in the ad-hoc range are in the proper
2613 state and all that and that we don't need to re-initialize them here. */
2614
2615#ifdef VBOX_WITH_NATIVE_NEM
2616 /* Notify NEM. */
2617 if (VM_IS_NEM_ENABLED(pVM))
2618 {
2619 uint8_t u2State = 0; /* (must have valid state as there can't be anything to preserve) */
2620 rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, cb, 0 /*fFlags*/, NULL, NULL, &u2State, &pMmioRamRange->uNemRange);
2621 AssertLogRelRCReturn(rc, rc);
2622
2623 uint32_t iPage = cb >> GUEST_PAGE_SHIFT;
2624 while (iPage-- > 0)
2625 PGM_PAGE_SET_NEM_STATE(&pMmioRamRange->aPages[iPage], u2State);
2626 }
2627#endif
2628 /* Insert it into the lookup table (may in theory fail). */
2629 rc = pgmR3PhysRamRangeInsertLookup(pVM, pMmioRamRange, GCPhys, &idxInsert);
2630 }
2631 if (RT_SUCCESS(rc))
2632 {
2633 /*
2634 * Register the access handler.
2635 */
2636 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, uUser, pMmioRamRange->pszDesc);
2637 if (RT_SUCCESS(rc))
2638 {
2639#ifdef VBOX_WITH_NATIVE_NEM
2640 /* Late NEM notification (currently not used by anyone). */
2641 if (VM_IS_NEM_ENABLED(pVM))
2642 {
2643 if (pOverlappingRange)
2644 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, cb, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
2645 pOverlappingRange->pbR3 + (uintptr_t)(GCPhys - pOverlappingRange->GCPhys),
2646 NULL /*pvMmio2*/, NULL /*puNemRange*/);
2647 else
2648 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, cb, 0 /*fFlags*/, NULL /*pvRam*/, NULL /*pvMmio2*/,
2649 &pMmioRamRange->uNemRange);
2650 AssertLogRelRC(rc);
2651 }
2652 if (RT_SUCCESS(rc))
2653#endif
2654 {
2655 pgmPhysInvalidatePageMapTLB(pVM);
2656 return VINF_SUCCESS;
2657 }
2658
2659 /*
2660 * Failed, so revert it all as best as we can (the memory content in
2661 * the overlapping case is gone).
2662 */
2663 PGMHandlerPhysicalDeregister(pVM, GCPhys);
2664 }
2665 }
2666
2667 if (!pOverlappingRange)
2668 {
2669#ifdef VBOX_WITH_NATIVE_NEM
2670 /* Notify NEM about the sudden removal of the RAM range we just told it about. */
2671 NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, cb, 0 /*fFlags*/, NULL /*pvRam*/, NULL /*pvMmio2*/,
2672 NULL /*pu2State*/, &pMmioRamRange->uNemRange);
2673#endif
2674
2675 /* Remove the ad hoc range from the lookup table. */
2676 idxInsert -= 1;
2677 pgmR3PhysRamRangeRemoveLookup(pVM, pMmioRamRange, &idxInsert);
2678 }
2679
2680 pgmPhysInvalidatePageMapTLB(pVM);
2681 return rc;
2682}
2683
2684
2685/**
2686 * This is the interface IOM is using to map an MMIO region.
2687 *
2688 * It will check for conflicts and ensure that a RAM range structure
2689 * is present before calling the PGMR3HandlerPhysicalRegister API to
2690 * register the callbacks.
2691 *
2692 * @returns VBox status code.
2693 *
2694 * @param pVM The cross context VM structure.
2695 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2696 * @param GCPhys The start of the MMIO region.
2697 * @param cb The size of the MMIO region.
2698 * @param idRamRange The RAM range ID for the MMIO region as returned by
2699 * PGMR3PhysMmioRegister().
2700 * @param hType The physical access handler type registration.
2701 * @param uUser The user argument.
2702 * @thread EMT(pVCpu)
2703 */
2704VMMR3_INT_DECL(int) PGMR3PhysMmioMap(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, uint16_t idRamRange,
2705 PGMPHYSHANDLERTYPE hType, uint64_t uUser)
2706{
2707 /*
2708 * Assert on some assumption.
2709 */
2710 VMCPU_ASSERT_EMT(pVCpu);
2711 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2712 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2713 RTGCPHYS const GCPhysLast = GCPhys + cb - 1U;
2714 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2715#ifdef VBOX_STRICT
2716 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
2717 Assert(pType);
2718 Assert(pType->enmKind == PGMPHYSHANDLERKIND_MMIO);
2719#endif
2720 AssertReturn(idRamRange <= pVM->pgm.s.idRamRangeMax && idRamRange > 0, VERR_INVALID_HANDLE);
2721 PPGMRAMRANGE const pMmioRamRange = pVM->pgm.s.apRamRanges[idRamRange];
2722 AssertReturn(pMmioRamRange, VERR_INVALID_HANDLE);
2723 AssertReturn(pMmioRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO, VERR_INVALID_HANDLE);
2724 AssertReturn(pMmioRamRange->cb == cb, VERR_OUT_OF_RANGE);
2725
2726 /*
2727 * Take the PGM lock and do the work.
2728 */
2729 int rc = PGM_LOCK(pVM);
2730 AssertRCReturn(rc, rc);
2731
2732 rc = pgmR3PhysMmioMapLocked(pVM, pVCpu, GCPhys, cb, GCPhysLast, pMmioRamRange, hType, uUser);
2733#ifdef VBOX_STRICT
2734 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
2735#endif
2736
2737 PGM_UNLOCK(pVM);
2738 return rc;
2739}
2740
2741
2742/**
2743 * Worker for PGMR3PhysMmioUnmap that's called with the PGM lock held.
2744 */
2745static int pgmR3PhysMmioUnmapLocked(PVM pVM, PVMCPU pVCpu, RTGCPHYS const GCPhys, RTGCPHYS const cb,
2746 RTGCPHYS const GCPhysLast, PPGMRAMRANGE const pMmioRamRange)
2747{
2748 /*
2749 * Lookup the RAM range containing the region to make sure it is actually mapped.
2750 */
2751 uint32_t idxLookup = pgmR3PhysRamRangeFindOverlappingIndex(pVM, GCPhys, GCPhysLast);
2752 AssertLogRelMsgReturn(idxLookup < pVM->pgm.s.RamRangeUnion.cLookupEntries,
2753 ("MMIO range not found at %RGp LB %RGp! (%s)\n", GCPhys, cb, pMmioRamRange->pszDesc),
2754 VERR_NOT_FOUND);
2755
2756 uint32_t const idLookupRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2757 AssertLogRelReturn(idLookupRange != 0 && idLookupRange <= pVM->pgm.s.idRamRangeMax, VERR_INTERNAL_ERROR_5);
2758 PPGMRAMRANGE const pLookupRange = pVM->pgm.s.apRamRanges[idLookupRange];
2759 AssertLogRelReturn(pLookupRange, VERR_INTERNAL_ERROR_4);
2760
2761 AssertLogRelMsgReturn(pLookupRange == pMmioRamRange || !PGM_RAM_RANGE_IS_AD_HOC(pLookupRange),
2762 ("MMIO unmap mixup at %RGp LB %RGp (%s) vs %RGp LB %RGp (%s)\n",
2763 GCPhys, cb, pMmioRamRange->pszDesc, pLookupRange->GCPhys, pLookupRange->cb, pLookupRange->pszDesc),
2764 VERR_NOT_FOUND);
2765
2766 /*
2767 * Deregister the handler. This should reset any aliases, so an ad hoc
2768 * range will only contain MMIO type pages afterwards.
2769 */
2770 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2771 if (RT_SUCCESS(rc))
2772 {
2773 if (pLookupRange != pMmioRamRange)
2774 {
2775 /*
2776 * Turn the pages back into RAM pages.
2777 */
2778 Log(("pgmR3PhysMmioUnmapLocked: Reverting MMIO range %RGp-%RGp (%s) in %RGp-%RGp (%s) to RAM.\n",
2779 GCPhys, GCPhysLast, pMmioRamRange->pszDesc,
2780 pLookupRange->GCPhys, pLookupRange->GCPhysLast, pLookupRange->pszDesc));
2781
2782 RTGCPHYS const offRange = GCPhys - pLookupRange->GCPhys;
2783 uint32_t iPage = offRange >> GUEST_PAGE_SHIFT;
2784 uint32_t cLeft = cb >> GUEST_PAGE_SHIFT;
2785 while (cLeft--)
2786 {
2787 PPGMPAGE pPage = &pLookupRange->aPages[iPage];
2788 AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage))
2789 //|| PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
2790 //|| PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO
2791 , ("%RGp %R[pgmpage]\n", pLookupRange->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage));
2792/** @todo this isn't entirely correct, is it now... aliases must be converted
2793 * to zero pages as they won't be. however, shouldn't
2794 * PGMHandlerPhysicalDeregister deal with this already? */
2795 if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage))
2796 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
2797 iPage++;
2798 }
2799
2800#ifdef VBOX_WITH_NATIVE_NEM
2801 /* Notify REM (failure will probably leave things in a non-working state). */
2802 if (VM_IS_NEM_ENABLED(pVM))
2803 {
2804 uint8_t u2State = UINT8_MAX;
2805 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
2806 pLookupRange->pbR3 ? pLookupRange->pbR3 + GCPhys - pLookupRange->GCPhys : NULL,
2807 NULL, &u2State, &pLookupRange->uNemRange);
2808 AssertLogRelRC(rc);
2809 /** @todo status code propagation here... This is likely fatal, right? */
2810 if (u2State != UINT8_MAX)
2811 pgmPhysSetNemStateForPages(&pLookupRange->aPages[(GCPhys - pLookupRange->GCPhys) >> GUEST_PAGE_SHIFT],
2812 cb >> GUEST_PAGE_SHIFT, u2State);
2813 }
2814#endif
2815 }
2816 else
2817 {
2818 /*
2819 * Unlink the ad hoc range.
2820 */
2821#ifdef VBOX_STRICT
2822 uint32_t iPage = cb >> GUEST_PAGE_SHIFT;
2823 while (iPage-- > 0)
2824 {
2825 PPGMPAGE const pPage = &pMmioRamRange->aPages[iPage];
2826 Assert(PGM_PAGE_IS_MMIO(pPage));
2827 }
2828#endif
2829
2830 Log(("pgmR3PhysMmioUnmapLocked: Unmapping ad hoc MMIO range for %RGp-%RGp %s\n",
2831 GCPhys, GCPhysLast, pMmioRamRange->pszDesc));
2832
2833#ifdef VBOX_WITH_NATIVE_NEM
2834 if (VM_IS_NEM_ENABLED(pVM)) /* Notify REM before we unlink the range. */
2835 {
2836 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, 0 /*fFlags*/,
2837 NULL, NULL, NULL, &pMmioRamRange->uNemRange);
2838 AssertLogRelRCReturn(rc, rc); /* we're up the creek if this hits. */
2839 }
2840#endif
2841
2842 pgmR3PhysRamRangeRemoveLookup(pVM, pMmioRamRange, &idxLookup);
2843 }
2844 }
2845
2846 /* Force a PGM pool flush as guest ram references have been changed. */
2847 /** @todo Not entirely SMP safe; assuming for now the guest takes care of
2848 * this internally (not touch mapped mmio while changing the mapping). */
2849 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2850 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2851
2852 pgmPhysInvalidatePageMapTLB(pVM);
2853 pgmPhysInvalidRamRangeTlbs(pVM);
2854
2855 return rc;
2856}
2857
2858
2859/**
2860 * This is the interface IOM is using to register an MMIO region.
2861 *
2862 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
2863 * any ad hoc PGMRAMRANGE left behind.
2864 *
2865 * @returns VBox status code.
2866 * @param pVM The cross context VM structure.
2867 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2868 * @param GCPhys The start of the MMIO region.
2869 * @param cb The size of the MMIO region.
2870 * @param idRamRange The RAM range ID for the MMIO region as returned by
2871 * PGMR3PhysMmioRegister().
2872 * @thread EMT(pVCpu)
2873 */
2874VMMR3_INT_DECL(int) PGMR3PhysMmioUnmap(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, uint16_t idRamRange)
2875{
2876 /*
2877 * Input validation.
2878 */
2879 VMCPU_ASSERT_EMT(pVCpu);
2880 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2881 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2882 RTGCPHYS const GCPhysLast = GCPhys + cb - 1U;
2883 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2884 AssertReturn(idRamRange <= pVM->pgm.s.idRamRangeMax && idRamRange > 0, VERR_INVALID_HANDLE);
2885 PPGMRAMRANGE const pMmioRamRange = pVM->pgm.s.apRamRanges[idRamRange];
2886 AssertReturn(pMmioRamRange, VERR_INVALID_HANDLE);
2887 AssertReturn(pMmioRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO, VERR_INVALID_HANDLE);
2888 AssertReturn(pMmioRamRange->cb == cb, VERR_OUT_OF_RANGE);
2889
2890 /*
2891 * Take the PGM lock and do what's asked.
2892 */
2893 int rc = PGM_LOCK(pVM);
2894 AssertRCReturn(rc, rc);
2895
2896 rc = pgmR3PhysMmioUnmapLocked(pVM, pVCpu, GCPhys, cb, GCPhysLast, pMmioRamRange);
2897#ifdef VBOX_STRICT
2898 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
2899#endif
2900
2901 PGM_UNLOCK(pVM);
2902 return rc;
2903}
2904
2905
2906
2907/*********************************************************************************************************************************
2908* MMIO2 *
2909*********************************************************************************************************************************/
2910
2911/**
2912 * Validates the claim to an MMIO2 range and returns the pointer to it.
2913 *
2914 * @returns The MMIO2 entry index on success, negative error status on failure.
2915 * @param pVM The cross context VM structure.
2916 * @param pDevIns The device instance owning the region.
2917 * @param hMmio2 Handle to look up.
2918 * @param pcChunks Where to return the number of chunks associated with
2919 * this handle.
2920 */
2921static int32_t pgmR3PhysMmio2ResolveHandle(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, uint32_t *pcChunks)
2922{
2923 *pcChunks = 0;
2924 uint32_t const idxFirst = hMmio2 - 1U;
2925 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
2926 AssertReturn(idxFirst < cMmio2Ranges, VERR_INVALID_HANDLE);
2927
2928 PPGMREGMMIO2RANGE const pFirst = &pVM->pgm.s.aMmio2Ranges[idxFirst];
2929 AssertReturn(pFirst->idMmio2 == hMmio2, VERR_INVALID_HANDLE);
2930 AssertReturn((pFirst->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_INVALID_HANDLE);
2931 AssertReturn(pFirst->pDevInsR3 == pDevIns && RT_VALID_PTR(pDevIns), VERR_NOT_OWNER);
2932
2933 /* Figure out how many chunks this handle spans. */
2934 if (pFirst->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
2935 *pcChunks = 1;
2936 else
2937 {
2938 uint32_t cChunks = 1;
2939 for (uint32_t idx = idxFirst + 1;; idx++)
2940 {
2941 cChunks++;
2942 AssertReturn(idx < cMmio2Ranges, VERR_INTERNAL_ERROR_2);
2943 PPGMREGMMIO2RANGE const pCur = &pVM->pgm.s.aMmio2Ranges[idx];
2944 AssertLogRelMsgReturn( pCur->pDevInsR3 == pDevIns
2945 && pCur->idMmio2 == idx + 1
2946 && pCur->iSubDev == pFirst->iSubDev
2947 && pCur->iRegion == pFirst->iRegion
2948 && !(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK),
2949 ("cur: %p/%#x/%#x/%#x/%#x/%s; first: %p/%#x/%#x/%#x/%#x/%s\n",
2950 pCur->pDevInsR3, pCur->idMmio2, pCur->iSubDev, pCur->iRegion, pCur->fFlags,
2951 pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc,
2952 pDevIns, idx + 1, pFirst->iSubDev, pFirst->iRegion, pFirst->fFlags,
2953 pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc),
2954 VERR_INTERNAL_ERROR_3);
2955 if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
2956 break;
2957 }
2958 *pcChunks = cChunks;
2959 }
2960
2961 return (int32_t)idxFirst;
2962}
2963
2964
2965/**
2966 * Check if a device has already registered a MMIO2 region.
2967 *
2968 * @returns NULL if not registered, otherwise pointer to the MMIO2.
2969 * @param pVM The cross context VM structure.
2970 * @param pDevIns The device instance owning the region.
2971 * @param iSubDev The sub-device number.
2972 * @param iRegion The region.
2973 */
2974DECLINLINE(PPGMREGMMIO2RANGE) pgmR3PhysMmio2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion)
2975{
2976 /*
2977 * Search the array. There shouldn't be many entries.
2978 */
2979 uint32_t idx = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
2980 while (idx-- > 0)
2981 if (RT_LIKELY( pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 != pDevIns
2982 || pVM->pgm.s.aMmio2Ranges[idx].iRegion != iRegion
2983 || pVM->pgm.s.aMmio2Ranges[idx].iSubDev != iSubDev))
2984 { /* likely */ }
2985 else
2986 return &pVM->pgm.s.aMmio2Ranges[idx];
2987 return NULL;
2988}
2989
2990/**
2991 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Map.
2992 */
2993static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, uint32_t idx, uint32_t cChunks)
2994{
2995 int rc = VINF_SUCCESS;
2996 while (cChunks-- > 0)
2997 {
2998 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
2999 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3000
3001 Assert(!(pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING));
3002 int rc2 = pgmHandlerPhysicalExRegister(pVM, pMmio2->pPhysHandlerR3, pRamRange->GCPhys, pRamRange->GCPhysLast);
3003 if (RT_SUCCESS(rc2))
3004 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING;
3005 else
3006 AssertLogRelMsgFailedStmt(("%#RGp-%#RGp %s failed -> %Rrc\n",
3007 pRamRange->GCPhys, pRamRange->GCPhysLast, pRamRange->pszDesc, rc2),
3008 rc = RT_SUCCESS(rc) ? rc2 : rc);
3009
3010 idx++;
3011 }
3012 return rc;
3013}
3014
3015
3016/**
3017 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Unmap.
3018 */
3019static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, uint32_t idx, uint32_t cChunks)
3020{
3021 int rc = VINF_SUCCESS;
3022 while (cChunks-- > 0)
3023 {
3024 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
3025 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3026 if (pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING)
3027 {
3028 int rc2 = pgmHandlerPhysicalExDeregister(pVM, pMmio2->pPhysHandlerR3);
3029 AssertLogRelMsgStmt(RT_SUCCESS(rc2),
3030 ("%#RGp-%#RGp %s failed -> %Rrc\n",
3031 pRamRange->GCPhys, pRamRange->GCPhysLast, pRamRange->pszDesc, rc2),
3032 rc = RT_SUCCESS(rc) ? rc2 : rc);
3033 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING;
3034 }
3035 idx++;
3036 }
3037 return rc;
3038}
3039
3040#if 0 // temp
3041
3042/**
3043 * Common worker PGMR3PhysMmio2PreRegister & PGMR3PhysMMIO2Register that links a
3044 * complete registration entry into the lists and lookup tables.
3045 *
3046 * @param pVM The cross context VM structure.
3047 * @param pNew The new MMIO / MMIO2 registration to link.
3048 */
3049static void pgmR3PhysMmio2Link(PVM pVM, PPGMREGMMIO2RANGE pNew)
3050{
3051 Assert(pNew->idMmio2 != UINT8_MAX);
3052
3053 /*
3054 * Link it into the list (order doesn't matter, so insert it at the head).
3055 *
3056 * Note! The range we're linking may consist of multiple chunks, so we
3057 * have to find the last one.
3058 */
3059 PPGMREGMMIO2RANGE pLast = pNew;
3060 for (pLast = pNew; ; pLast = pLast->pNextR3)
3061 {
3062 if (pLast->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3063 break;
3064 Assert(pLast->pNextR3);
3065 Assert(pLast->pNextR3->pDevInsR3 == pNew->pDevInsR3);
3066 Assert(pLast->pNextR3->iSubDev == pNew->iSubDev);
3067 Assert(pLast->pNextR3->iRegion == pNew->iRegion);
3068 Assert(pLast->pNextR3->idMmio2 == pLast->idMmio2 + 1);
3069 }
3070
3071 PGM_LOCK_VOID(pVM);
3072
3073 /* Link in the chain of ranges at the head of the list. */
3074 pLast->pNextR3 = pVM->pgm.s.pRegMmioRangesR3;
3075 pVM->pgm.s.pRegMmioRangesR3 = pNew;
3076
3077 /* Insert the MMIO2 range/page IDs. */
3078 uint8_t idMmio2 = pNew->idMmio2;
3079 for (;;)
3080 {
3081 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
3082 Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
3083 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
3084 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = pNew->RamRange.pSelfR0 - RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange);
3085 if (pNew->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
3086 break;
3087 pNew = pNew->pNextR3;
3088 idMmio2++;
3089 }
3090
3091 pgmPhysInvalidatePageMapTLB(pVM);
3092 PGM_UNLOCK(pVM);
3093}
3094#endif
3095
3096
3097/**
3098 * Allocate and register an MMIO2 region.
3099 *
3100 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM
3101 * associated with a device. It is also non-shared memory with a permanent
3102 * ring-3 mapping and page backing (presently).
3103 *
3104 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
3105 * the VM, in which case we'll drop the base memory pages. Presently we will
3106 * make no attempt to preserve anything that happens to be present in the base
3107 * memory that is replaced, this is of course incorrect but it's too much
3108 * effort.
3109 *
3110 * @returns VBox status code.
3111 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
3112 * memory.
3113 * @retval VERR_ALREADY_EXISTS if the region already exists.
3114 *
3115 * @param pVM The cross context VM structure.
3116 * @param pDevIns The device instance owning the region.
3117 * @param iSubDev The sub-device number.
3118 * @param iRegion The region number. If the MMIO2 memory is a PCI
3119 * I/O region this number has to be the number of that
3120 * region. Otherwise it can be any number save
3121 * UINT8_MAX.
3122 * @param cb The size of the region. Must be page aligned.
3123 * @param fFlags Reserved for future use, must be zero.
3124 * @param pszDesc The description.
3125 * @param ppv Where to store the pointer to the ring-3 mapping of
3126 * the memory.
3127 * @param phRegion Where to return the MMIO2 region handle. Optional.
3128 * @thread EMT(0)
3129 *
3130 * @note Only callable at VM creation time and during VM state loading.
3131 * The latter is for PCNet saved state compatibility with pre 4.3.6
3132 * state.
3133 */
3134VMMR3_INT_DECL(int) PGMR3PhysMmio2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
3135 uint32_t fFlags, const char *pszDesc, void **ppv, PGMMMIO2HANDLE *phRegion)
3136{
3137 /*
3138 * Validate input.
3139 */
3140 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
3141 *ppv = NULL;
3142 if (phRegion)
3143 {
3144 AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
3145 *phRegion = NIL_PGMMMIO2HANDLE;
3146 }
3147 PVMCPU const pVCpu = VMMGetCpu(pVM);
3148 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
3149 VMSTATE const enmVMState = VMR3GetState(pVM);
3150 AssertMsgReturn(enmVMState == VMSTATE_CREATING || enmVMState == VMSTATE_LOADING,
3151 ("state %s, expected CREATING or LOADING\n", VMGetStateName(enmVMState)),
3152 VERR_VM_INVALID_VM_STATE);
3153
3154 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3155 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3156 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3157
3158 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
3159 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
3160
3161 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3162 AssertReturn(cb, VERR_INVALID_PARAMETER);
3163 AssertReturn(!(fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), VERR_INVALID_FLAGS);
3164
3165 const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT;
3166 AssertLogRelReturn(((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
3167 AssertLogRelReturn(cGuestPages <= PGM_MAX_PAGES_PER_MMIO2_REGION, VERR_OUT_OF_RANGE);
3168 AssertLogRelReturn(cGuestPages <= (MM_MMIO_64_MAX >> GUEST_PAGE_SHIFT), VERR_OUT_OF_RANGE);
3169
3170 AssertReturn(pgmR3PhysMmio2Find(pVM, pDevIns, iSubDev, iRegion) == NULL, VERR_ALREADY_EXISTS);
3171
3172 /*
3173 * For the 2nd+ instance, mangle the description string so it's unique.
3174 */
3175 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
3176 {
3177 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
3178 if (!pszDesc)
3179 return VERR_NO_MEMORY;
3180 }
3181
3182 /*
3183 * Check that we've got sufficient MMIO2 ID space for this request (the
3184 * allocation will be done later once we've got the backing memory secured,
3185 * but given the EMT0 restriction, that's not going to be a problem).
3186 *
3187 * The zero ID is not used as it could be confused with NIL_GMM_PAGEID, so
3188 * the IDs goes from 1 thru PGM_MAX_MMIO2_RANGES.
3189 */
3190 unsigned const cChunks = pgmPhysMmio2CalcChunkCount(cb, NULL);
3191
3192 int rc = PGM_LOCK(pVM);
3193 AssertRCReturn(rc, rc);
3194
3195 AssertCompile(PGM_MAX_MMIO2_RANGES < 255);
3196 uint8_t const idMmio2 = pVM->pgm.s.cMmio2Ranges + 1;
3197 AssertLogRelReturnStmt(idMmio2 + cChunks <= PGM_MAX_MMIO2_RANGES, PGM_UNLOCK(pVM), VERR_PGM_TOO_MANY_MMIO2_RANGES);
3198
3199 /*
3200 * Try reserve and allocate the backing memory first as this is what is
3201 * most likely to fail.
3202 */
3203 rc = MMR3AdjustFixedReservation(pVM, cGuestPages, pszDesc);
3204 if (RT_SUCCESS(rc))
3205 {
3206 /*
3207 * If we're in driverless we'll be doing the work here, otherwise we
3208 * must call ring-0 to do the job as we'll need physical addresses
3209 * and maybe a ring-0 mapping address for it all.
3210 */
3211 if (SUPR3IsDriverless())
3212 rc = pgmPhysMmio2RegisterWorker(pVM, cGuestPages, idMmio2, cChunks, pDevIns, iSubDev, iRegion, fFlags);
3213 else
3214 {
3215 PGMPHYSMMIO2REGISTERREQ Mmio2RegReq;
3216 Mmio2RegReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3217 Mmio2RegReq.Hdr.cbReq = sizeof(Mmio2RegReq);
3218 Mmio2RegReq.cbGuestPage = GUEST_PAGE_SIZE;
3219 Mmio2RegReq.cGuestPages = cGuestPages;
3220 Mmio2RegReq.idMmio2 = idMmio2;
3221 Mmio2RegReq.cChunks = cChunks;
3222 Mmio2RegReq.iSubDev = (uint8_t)iSubDev;
3223 Mmio2RegReq.iRegion = (uint8_t)iRegion;
3224 Mmio2RegReq.fFlags = fFlags;
3225 Mmio2RegReq.pDevIns = pDevIns;
3226 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_MMIO2_REGISTER, 0 /*u64Arg*/, &Mmio2RegReq.Hdr);
3227 }
3228 if (RT_SUCCESS(rc))
3229 {
3230 Assert(idMmio2 + cChunks - 1 == pVM->pgm.s.cMmio2Ranges);
3231
3232 /*
3233 * There are two things left to do:
3234 * 1. Add the description to the associated RAM ranges.
3235 * 2. Pre-allocate access handlers for dirty bit tracking if necessary.
3236 */
3237 bool const fNeedHandler = (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
3238#ifdef VBOX_WITH_PGM_NEM_MODE
3239 && (!VM_IS_NEM_ENABLED(pVM) || !NEMR3IsMmio2DirtyPageTrackingSupported(pVM))
3240#endif
3241 ;
3242 for (uint32_t idxChunk = 0; idxChunk < cChunks; idxChunk++)
3243 {
3244 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idxChunk + idMmio2 - 1];
3245 Assert(pMmio2->idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
3246 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apRamRanges[pMmio2->idRamRange];
3247 Assert(pRamRange->pbR3 == pMmio2->pbR3);
3248 Assert(pRamRange->cb == pMmio2->cbReal);
3249
3250 pRamRange->pszDesc = pszDesc; /** @todo mangle this if we got more than one chunk */
3251 if (fNeedHandler)
3252 {
3253 rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType, pMmio2->idMmio2,
3254 pszDesc, &pMmio2->pPhysHandlerR3);
3255 AssertLogRelMsgReturnStmt(RT_SUCCESS(rc),
3256 ("idMmio2=%#x idxChunk=%#x rc=%Rc\n", idMmio2, idxChunk, rc),
3257 PGM_UNLOCK(pVM),
3258 rc); /* PGMR3Term will take care of it all. */
3259 }
3260 }
3261
3262 /*
3263 * Done!
3264 */
3265 if (phRegion)
3266 *phRegion = idMmio2;
3267 *ppv = pVM->pgm.s.aMmio2Ranges[idMmio2 - 1].pbR3;
3268
3269 PGM_UNLOCK(pVM);
3270 return VINF_SUCCESS;
3271 }
3272
3273 MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pszDesc);
3274 }
3275 if (pDevIns->iInstance > 0)
3276 MMR3HeapFree((void *)pszDesc);
3277 return rc;
3278}
3279
3280/**
3281 * Deregisters and frees an MMIO2 region.
3282 *
3283 * Any physical access handlers registered for the region must be deregistered
3284 * before calling this function.
3285 *
3286 * @returns VBox status code.
3287 * @param pVM The cross context VM structure.
3288 * @param pDevIns The device instance owning the region.
3289 * @param hMmio2 The MMIO2 handle to deregister, or NIL if all
3290 * regions for the given device is to be deregistered.
3291 * @thread EMT(0)
3292 *
3293 * @note Only callable during VM state loading. This is to jettison an unused
3294 * MMIO2 section present in PCNet saved state prior to VBox v4.3.6.
3295 */
3296VMMR3_INT_DECL(int) PGMR3PhysMmio2Deregister(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
3297{
3298 /*
3299 * Validate input.
3300 */
3301 PVMCPU const pVCpu = VMMGetCpu(pVM);
3302 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
3303 VMSTATE const enmVMState = VMR3GetState(pVM);
3304 AssertMsgReturn(enmVMState == VMSTATE_LOADING,
3305 ("state %s, expected LOADING\n", VMGetStateName(enmVMState)),
3306 VERR_VM_INVALID_VM_STATE);
3307
3308 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3309
3310 /*
3311 * Take the PGM lock and scan for registrations matching the requirements.
3312 * We do this backwards to more easily reduce the cMmio2Ranges count when
3313 * stuff is removed.
3314 */
3315 PGM_LOCK_VOID(pVM);
3316
3317 int rc = VINF_SUCCESS;
3318 unsigned cFound = 0;
3319 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
3320 uint32_t idx = cMmio2Ranges;
3321 while (idx-- > 0)
3322 {
3323 PPGMREGMMIO2RANGE pCur = &pVM->pgm.s.aMmio2Ranges[idx];
3324 if ( pCur->pDevInsR3 == pDevIns
3325 && ( hMmio2 == NIL_PGMMMIO2HANDLE
3326 || pCur->idMmio2 == hMmio2))
3327 {
3328 cFound++;
3329
3330 /*
3331 * Wind back the first chunk for this registration.
3332 */
3333 AssertLogRelMsgReturnStmt(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, ("idx=%u fFlags=%#x\n", idx, pCur->fFlags),
3334 PGM_UNLOCK(pVM), VERR_INTERNAL_ERROR_3);
3335 uint32_t cGuestPages = pCur->cbReal >> GUEST_PAGE_SHIFT;
3336 uint32_t cChunks = 1;
3337 while ( idx > 0
3338 && !(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK))
3339 {
3340 AssertLogRelMsgReturnStmt( pCur[-1].pDevInsR3 == pDevIns
3341 && pCur[-1].iRegion == pCur->iRegion
3342 && pCur[-1].iSubDev == pCur->iSubDev,
3343 ("[%u]: %p/%#x/%#x/fl=%#x; [%u]: %p/%#x/%#x/fl=%#x; cChunks=%#x\n",
3344 idx - 1, pCur[-1].pDevInsR3, pCur[-1].iRegion, pCur[-1].iSubDev, pCur[-1].fFlags,
3345 idx, pCur->pDevInsR3, pCur->iRegion, pCur->iSubDev, pCur->fFlags, cChunks),
3346 PGM_UNLOCK(pVM), VERR_INTERNAL_ERROR_3);
3347 cChunks++;
3348 pCur--;
3349 idx--;
3350 cGuestPages += pCur->cbReal >> GUEST_PAGE_SHIFT;
3351 }
3352 AssertLogRelMsgReturnStmt(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK,
3353 ("idx=%u fFlags=%#x cChunks=%#x\n", idx, pCur->fFlags, cChunks),
3354 PGM_UNLOCK(pVM), VERR_INTERNAL_ERROR_3);
3355
3356 /*
3357 * Unmap it if it's mapped.
3358 */
3359 if (pCur->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
3360 {
3361 int rc2 = PGMR3PhysMmio2Unmap(pVM, pCur->pDevInsR3, idx + 1, pCur->GCPhys);
3362 AssertRC(rc2);
3363 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
3364 rc = rc2;
3365 }
3366
3367 /*
3368 * Destroy access handlers.
3369 */
3370 for (uint32_t iChunk = 0; iChunk < cChunks; iChunk++)
3371 if (pCur[iChunk].pPhysHandlerR3)
3372 {
3373 pgmHandlerPhysicalExDestroy(pVM, pCur[iChunk].pPhysHandlerR3);
3374 pCur[iChunk].pPhysHandlerR3 = NULL;
3375 }
3376
3377 /*
3378 * Call kernel mode / worker to do the actual deregistration.
3379 */
3380 const char * const pszDesc = pVM->pgm.s.apMmio2RamRanges[idx] ? pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc : NULL;
3381 int rc2;
3382 if (SUPR3IsDriverless())
3383 {
3384 Assert(PGM_IS_IN_NEM_MODE(pVM));
3385 rc2 = pgmPhysMmio2DeregisterWorker(pVM, idx, cChunks, pDevIns);
3386 AssertLogRelMsgStmt(RT_SUCCESS(rc2),
3387 ("pgmPhysMmio2DeregisterWorker: rc=%Rrc idx=%#x cChunks=%#x %s\n",
3388 rc2, idx, cChunks, pszDesc),
3389 rc = RT_SUCCESS(rc) ? rc2 : rc);
3390 }
3391 else
3392 {
3393 PGMPHYSMMIO2DEREGISTERREQ Mmio2DeregReq;
3394 Mmio2DeregReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3395 Mmio2DeregReq.Hdr.cbReq = sizeof(Mmio2DeregReq);
3396 Mmio2DeregReq.idMmio2 = idx + 1;
3397 Mmio2DeregReq.cChunks = cChunks;
3398 Mmio2DeregReq.pDevIns = pDevIns;
3399 rc2 = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER, 0 /*u64Arg*/, &Mmio2DeregReq.Hdr);
3400 AssertLogRelMsgStmt(RT_SUCCESS(rc2),
3401 ("VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER: rc=%Rrc idx=%#x cChunks=%#x %s\n",
3402 rc2, idx, cChunks, pszDesc),
3403 rc = RT_SUCCESS(rc) ? rc2 : rc);
3404 }
3405 if (RT_FAILURE(rc2))
3406 {
3407 LogRel(("PGMR3PhysMmio2Deregister: Deregistration failed: %Rrc; cChunks=%u %s\n", rc, cChunks, pszDesc));
3408 if (RT_SUCCESS(rc))
3409 rc = rc2;
3410 }
3411
3412 /*
3413 * Adjust the memory reservation.
3414 */
3415 if (!PGM_IS_IN_NEM_MODE(pVM) && RT_SUCCESS(rc2))
3416 {
3417 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pszDesc);
3418 AssertLogRelMsgStmt(RT_SUCCESS(rc2), ("rc=%Rrc cGuestPages=%#x\n", rc, cGuestPages),
3419 rc = RT_SUCCESS(rc) ? rc2 : rc);
3420 }
3421
3422 /* Are we done? */
3423 if (hMmio2 != NIL_PGMMMIO2HANDLE)
3424 break;
3425 }
3426 }
3427 pgmPhysInvalidatePageMapTLB(pVM);
3428 PGM_UNLOCK(pVM);
3429 return !cFound && hMmio2 != NIL_PGMMMIO2HANDLE ? VERR_NOT_FOUND : rc;
3430}
3431
3432
3433/**
3434 * Worker form PGMR3PhysMmio2Map.
3435 */
3436static int pgmR3PhysMmio2MapLocked(PVM pVM, uint32_t const idxFirst, uint32_t const cChunks,
3437 RTGCPHYS const GCPhys, RTGCPHYS const GCPhysLast)
3438{
3439 /*
3440 * Validate the mapped status now that we've got the lock.
3441 */
3442 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
3443 {
3444 AssertReturn( pVM->pgm.s.aMmio2Ranges[idx].GCPhys == NIL_RTGCPHYS
3445 && !(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_MAPPED),
3446 VERR_WRONG_ORDER);
3447 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3448 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_INTERNAL_ERROR_3);
3449 AssertReturn(pRamRange->GCPhysLast == NIL_RTGCPHYS, VERR_INTERNAL_ERROR_3);
3450 Assert(pRamRange->pbR3 == pVM->pgm.s.aMmio2Ranges[idx].pbR3);
3451 Assert(pRamRange->idRange == pVM->pgm.s.aMmio2Ranges[idx].idRamRange);
3452 }
3453
3454 const char * const pszDesc = pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc;
3455#ifdef VBOX_WITH_NATIVE_NEM
3456 uint32_t const fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2
3457 | (pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
3458 ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0);
3459#endif
3460
3461 /*
3462 * Now, check if this falls into a regular RAM range or if we should use
3463 * the ad-hoc one.
3464 *
3465 * Note! For reasons of simplictly, we're considering the whole MMIO2 area
3466 * here rather than individual chunks.
3467 */
3468 int rc = VINF_SUCCESS;
3469 uint32_t idxInsert = UINT32_MAX;
3470 PPGMRAMRANGE const pOverlappingRange = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxInsert);
3471 if (pOverlappingRange)
3472 {
3473 /* Simplification: all within the same range. */
3474 AssertLogRelMsgReturn( GCPhys >= pOverlappingRange->GCPhys
3475 && GCPhysLast <= pOverlappingRange->GCPhysLast,
3476 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
3477 GCPhys, GCPhysLast, pszDesc,
3478 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc),
3479 VERR_PGM_RAM_CONFLICT);
3480
3481 /* Check that is isn't an ad hoc range, but a real RAM range. */
3482 AssertLogRelMsgReturn(!PGM_RAM_RANGE_IS_AD_HOC(pOverlappingRange),
3483 ("%RGp-%RGp (MMIO2/%s) mapping attempt in non-RAM range: %RGp-%RGp (%s)\n",
3484 GCPhys, GCPhysLast, pszDesc,
3485 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc),
3486 VERR_PGM_RAM_CONFLICT);
3487
3488 /* There can only be one MMIO2 chunk matching here! */
3489 AssertLogRelMsgReturn(cChunks == 1,
3490 ("%RGp-%RGp (MMIO2/%s) consists of %u chunks whereas the RAM (%s) somehow doesn't!\n",
3491 GCPhys, GCPhysLast, pszDesc, cChunks, pOverlappingRange->pszDesc),
3492 VERR_PGM_PHYS_MMIO_EX_IPE);
3493
3494 /* Check that it's all RAM pages. */
3495 PCPGMPAGE pPage = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT];
3496 uint32_t const cMmio2Pages = pVM->pgm.s.apMmio2RamRanges[idxFirst]->cb >> GUEST_PAGE_SHIFT;
3497 uint32_t cPagesLeft = cMmio2Pages;
3498 while (cPagesLeft-- > 0)
3499 {
3500 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
3501 ("%RGp-%RGp (MMIO2/%s): %RGp is not a RAM page - type=%d desc=%s\n", GCPhys, GCPhysLast,
3502 pszDesc, pOverlappingRange->GCPhys, PGM_PAGE_GET_TYPE(pPage), pOverlappingRange->pszDesc),
3503 VERR_PGM_RAM_CONFLICT);
3504 pPage++;
3505 }
3506
3507#ifdef VBOX_WITH_PGM_NEM_MODE
3508 /* We cannot mix MMIO2 into a RAM range in simplified memory mode because pOverlappingRange->pbR3 can't point
3509 both at the RAM and MMIO2, so we won't ever write & read from the actual MMIO2 memory if we try. */
3510 AssertLogRelMsgReturn(!VM_IS_NEM_ENABLED(pVM),
3511 ("Putting %s at %RGp-%RGp is not possible in NEM mode because existing %RGp-%RGp (%s) mapping\n",
3512 pszDesc, GCPhys, GCPhysLast,
3513 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc),
3514 VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
3515#endif
3516
3517 /*
3518 * Make all the pages in the range MMIO/ZERO pages, freeing any
3519 * RAM pages currently mapped here. This might not be 100% correct,
3520 * but so what, we do the same from MMIO...
3521 */
3522 rc = pgmR3PhysFreePageRange(pVM, pOverlappingRange, GCPhys, GCPhysLast, NULL);
3523 AssertRCReturn(rc, rc);
3524
3525 Log(("PGMR3PhysMmio2Map: %RGp-%RGp %s - inside %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc,
3526 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc));
3527
3528 /*
3529 * We're all in for mapping it now. Update the MMIO2 range to reflect it.
3530 */
3531 pVM->pgm.s.aMmio2Ranges[idxFirst].GCPhys = GCPhys;
3532 pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags |= PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED;
3533
3534 /*
3535 * Replace the pages in the range.
3536 */
3537 PPGMPAGE pPageSrc = &pVM->pgm.s.apMmio2RamRanges[idxFirst]->aPages[0];
3538 PPGMPAGE pPageDst = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT];
3539 cPagesLeft = cMmio2Pages;
3540 while (cPagesLeft-- > 0)
3541 {
3542 Assert(PGM_PAGE_IS_MMIO(pPageDst));
3543
3544 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
3545 uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
3546 PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
3547 PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
3548 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
3549 PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
3550 PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
3551 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
3552 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
3553 /* NEM state is not relevant, see VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE above. */
3554
3555 pVM->pgm.s.cZeroPages--;
3556 pPageSrc++;
3557 pPageDst++;
3558 }
3559
3560 /* Force a PGM pool flush as guest ram references have been changed. */
3561 /** @todo not entirely SMP safe; assuming for now the guest takes
3562 * care of this internally (not touch mapped mmio while changing the
3563 * mapping). */
3564 PVMCPU pVCpu = VMMGetCpu(pVM);
3565 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3566 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3567 }
3568 else
3569 {
3570 /*
3571 * No RAM range, insert the ones prepared during registration.
3572 */
3573 Log(("PGMR3PhysMmio2Map: %RGp-%RGp %s - no RAM overlap\n", GCPhys, GCPhysLast, pszDesc));
3574 RTGCPHYS GCPhysCur = GCPhys;
3575 uint32_t iChunk = 0;
3576 uint32_t idx = idxFirst;
3577 for (; iChunk < cChunks; iChunk++, idx++)
3578 {
3579 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
3580 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3581 Assert(pRamRange->idRange == pMmio2->idRamRange);
3582 Assert(pMmio2->GCPhys == NIL_RTGCPHYS);
3583
3584#ifdef VBOX_WITH_NATIVE_NEM
3585 /* Tell NEM and get the new NEM state for the pages. */
3586 uint8_t u2NemState = 0;
3587 if (VM_IS_NEM_ENABLED(pVM))
3588 {
3589 rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhysCur, pRamRange->cb, fNemFlags, NULL /*pvRam*/, pRamRange->pbR3,
3590 &u2NemState, &pRamRange->uNemRange);
3591 AssertLogRelMsgBreak(RT_SUCCESS(rc),
3592 ("%RGp LB %RGp fFlags=%#x (%s)\n",
3593 GCPhysCur, pRamRange->cb, pMmio2->fFlags, pRamRange->pszDesc));
3594 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; /* Set this early to indicate that NEM has been notified. */
3595 }
3596#endif
3597
3598 /* Clear the tracking data of pages we're going to reactivate. */
3599 PPGMPAGE pPageSrc = &pRamRange->aPages[0];
3600 uint32_t cPagesLeft = pRamRange->cb >> GUEST_PAGE_SHIFT;
3601 while (cPagesLeft-- > 0)
3602 {
3603 PGM_PAGE_SET_TRACKING(pVM, pPageSrc, 0);
3604 PGM_PAGE_SET_PTE_INDEX(pVM, pPageSrc, 0);
3605#ifdef VBOX_WITH_NATIVE_NEM
3606 PGM_PAGE_SET_NEM_STATE(pPageSrc, u2NemState);
3607#endif
3608 pPageSrc++;
3609 }
3610
3611 /* Insert the RAM range into the lookup table. */
3612 rc = pgmR3PhysRamRangeInsertLookup(pVM, pRamRange, GCPhysCur, &idxInsert);
3613 AssertRCBreak(rc);
3614
3615 /* Mark the range as fully mapped. */
3616 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_OVERLAPPING;
3617 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_MAPPED;
3618 pMmio2->GCPhys = GCPhysCur;
3619
3620 /* Advance. */
3621 GCPhysCur += pRamRange->cb;
3622 }
3623 if (RT_FAILURE(rc))
3624 {
3625 /*
3626 * Bail out anything we've done so far.
3627 */
3628 idxInsert -= 1;
3629 do
3630 {
3631 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
3632 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3633
3634#ifdef VBOX_WITH_NATIVE_NEM
3635 if ( VM_IS_NEM_ENABLED(pVM)
3636 && (pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_MAPPED))
3637 {
3638 uint8_t u2NemState = UINT8_MAX;
3639 NEMR3NotifyPhysMmioExUnmap(pVM, GCPhysCur, pRamRange->cb, fNemFlags, NULL, pRamRange->pbR3,
3640 &u2NemState, &pRamRange->uNemRange);
3641 if (u2NemState != UINT8_MAX)
3642 pgmPhysSetNemStateForPages(pRamRange->aPages, pRamRange->cb >> GUEST_PAGE_SHIFT, u2NemState);
3643 }
3644#endif
3645 if (pMmio2->GCPhys != NIL_RTGCPHYS)
3646 pgmR3PhysRamRangeRemoveLookup(pVM, pRamRange, &idxInsert);
3647
3648 pMmio2->GCPhys = NIL_RTGCPHYS;
3649 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_MAPPED;
3650
3651 idx--;
3652 } while (iChunk-- > 0);
3653 return rc;
3654 }
3655 }
3656
3657 /*
3658 * If the range have dirty page monitoring enabled, enable that.
3659 *
3660 * We ignore failures here for now because if we fail, the whole mapping
3661 * will have to be reversed and we'll end up with nothing at all on the
3662 * screen and a grumpy guest, whereas if we just go on, we'll only have
3663 * visual distortions to gripe about. There will be something in the
3664 * release log.
3665 */
3666 if ( pVM->pgm.s.aMmio2Ranges[idxFirst].pPhysHandlerR3
3667 && (pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
3668 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, idxFirst, cChunks);
3669
3670 /* Flush physical page map TLB. */
3671 pgmPhysInvalidatePageMapTLB(pVM);
3672
3673#ifdef VBOX_WITH_NATIVE_NEM
3674 /*
3675 * Late NEM notification (currently unused).
3676 */
3677 if (VM_IS_NEM_ENABLED(pVM))
3678 {
3679 if (pOverlappingRange)
3680 {
3681 uint8_t * const pbRam = pOverlappingRange->pbR3 ? &pOverlappingRange->pbR3[GCPhys - pOverlappingRange->GCPhys] : NULL;
3682 rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1U,
3683 fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, pbRam,
3684 pVM->pgm.s.aMmio2Ranges[idxFirst].pbR3, NULL /*puNemRange*/);
3685 }
3686 else
3687 {
3688 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
3689 {
3690 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3691 Assert(pVM->pgm.s.aMmio2Ranges[idx].GCPhys == pRamRange->GCPhys);
3692
3693 rc = NEMR3NotifyPhysMmioExMapLate(pVM, pRamRange->GCPhys, pRamRange->cb, fNemFlags, NULL /*pvRam*/,
3694 pRamRange->pbR3, &pRamRange->uNemRange);
3695 AssertRCBreak(rc);
3696 }
3697 }
3698 AssertLogRelRCReturnStmt(rc,
3699 PGMR3PhysMmio2Unmap(pVM, pVM->pgm.s.aMmio2Ranges[idxFirst].pDevInsR3, idxFirst + 1, GCPhys),
3700 rc);
3701 }
3702#endif
3703
3704 return VINF_SUCCESS;
3705}
3706
3707
3708/**
3709 * Maps a MMIO2 region.
3710 *
3711 * This is typically done when a guest / the bios / state loading changes the
3712 * PCI config. The replacing of base memory has the same restrictions as during
3713 * registration, of course.
3714 *
3715 * @returns VBox status code.
3716 *
3717 * @param pVM The cross context VM structure.
3718 * @param pDevIns The device instance owning the region.
3719 * @param hMmio2 The handle of the region to map.
3720 * @param GCPhys The guest-physical address to be remapped.
3721 */
3722VMMR3_INT_DECL(int) PGMR3PhysMmio2Map(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
3723{
3724 /*
3725 * Validate input.
3726 */
3727 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3728 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3729 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
3730 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
3731 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3732 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
3733
3734 uint32_t cChunks = 0;
3735 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
3736 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst);
3737
3738 /* Gather the full range size so we can validate the mapping address properly. */
3739 RTGCPHYS cbRange = 0;
3740 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
3741 cbRange += pVM->pgm.s.apMmio2RamRanges[idx]->cb;
3742
3743 RTGCPHYS const GCPhysLast = GCPhys + cbRange - 1;
3744 AssertLogRelReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
3745
3746 /*
3747 * Take the PGM lock and call worker.
3748 */
3749 int rc = PGM_LOCK(pVM);
3750 AssertRCReturn(rc, rc);
3751
3752 rc = pgmR3PhysMmio2MapLocked(pVM, idxFirst, cChunks, GCPhys, GCPhysLast);
3753#ifdef VBOX_STRICT
3754 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
3755#endif
3756
3757 PGM_UNLOCK(pVM);
3758 return rc;
3759}
3760
3761
3762/**
3763 * Worker form PGMR3PhysMmio2Map.
3764 */
3765static int pgmR3PhysMmio2UnmapLocked(PVM pVM, uint32_t const idxFirst, uint32_t const cChunks, RTGCPHYS const GCPhysIn)
3766{
3767 /*
3768 * Validate input.
3769 */
3770 RTGCPHYS cbRange = 0;
3771 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
3772 {
3773 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
3774 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3775 AssertReturn(pMmio2->idRamRange == pRamRange->idRange, VERR_INTERNAL_ERROR_3);
3776 AssertReturn(pMmio2->fFlags & PGMREGMMIO2RANGE_F_MAPPED, VERR_WRONG_ORDER);
3777 AssertReturn(pMmio2->GCPhys != NIL_RTGCPHYS, VERR_WRONG_ORDER);
3778 cbRange += pRamRange->cb;
3779 }
3780
3781 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst];
3782 PPGMRAMRANGE const pFirstRamRange = pVM->pgm.s.apMmio2RamRanges[idxFirst];
3783 const char * const pszDesc = pFirstRamRange->pszDesc;
3784 AssertLogRelMsgReturn(GCPhysIn == pFirstMmio2->GCPhys || GCPhysIn == NIL_RTGCPHYS,
3785 ("GCPhys=%RGp, actual address is %RGp\n", GCPhysIn, pFirstMmio2->GCPhys),
3786 VERR_MISMATCH);
3787 RTGCPHYS const GCPhys = pFirstMmio2->GCPhys; /* (it's always NIL_RTGCPHYS) */
3788 Log(("PGMR3PhysMmio2Unmap: %RGp-%RGp %s\n", GCPhys, GCPhys + cbRange - 1U, pszDesc));
3789
3790 uint16_t const fOldFlags = pFirstMmio2->fFlags;
3791 Assert(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED);
3792
3793 /* Find the first entry in the lookup table and verify the overlapping flag. */
3794 uint32_t idxLookup = pgmR3PhysRamRangeFindOverlappingIndex(pVM, GCPhys, GCPhys + pFirstRamRange->cb - 1U);
3795 AssertLogRelMsgReturn(idxLookup < pVM->pgm.s.RamRangeUnion.cLookupEntries,
3796 ("MMIO2 range not found at %RGp LB %RGp in the lookup table! (%s)\n",
3797 GCPhys, pFirstRamRange->cb, pszDesc),
3798 VERR_INTERNAL_ERROR_2);
3799
3800 uint32_t const idLookupRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
3801 AssertLogRelReturn(idLookupRange != 0 && idLookupRange <= pVM->pgm.s.idRamRangeMax, VERR_INTERNAL_ERROR_5);
3802 PPGMRAMRANGE const pLookupRange = pVM->pgm.s.apRamRanges[idLookupRange];
3803 AssertLogRelReturn(pLookupRange, VERR_INTERNAL_ERROR_3);
3804
3805 AssertLogRelMsgReturn(fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING
3806 ? pLookupRange != pFirstRamRange : pLookupRange == pFirstRamRange,
3807 ("MMIO2 unmap mixup at %RGp LB %RGp fl=%#x (%s) vs %RGp LB %RGp (%s)\n",
3808 GCPhys, cbRange, fOldFlags, pszDesc, pLookupRange->GCPhys, pLookupRange->cb, pLookupRange->pszDesc),
3809 VERR_INTERNAL_ERROR_4);
3810
3811 /*
3812 * If monitoring dirty pages, we must deregister the handlers first.
3813 */
3814 if ( pFirstMmio2->pPhysHandlerR3
3815 && (fOldFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
3816 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, idxFirst, cChunks);
3817
3818 /*
3819 * Unmap it.
3820 */
3821 int rcRet = VINF_SUCCESS;
3822#ifdef VBOX_WITH_NATIVE_NEM
3823 uint32_t const fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2
3824 | (fOldFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
3825 ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0);
3826#endif
3827 if (fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING)
3828 {
3829 /*
3830 * We've replaced RAM, replace with zero pages.
3831 *
3832 * Note! This is where we might differ a little from a real system, because
3833 * it's likely to just show the RAM pages as they were before the
3834 * MMIO2 region was mapped here.
3835 */
3836 /* Only one chunk allowed when overlapping! */
3837 Assert(cChunks == 1);
3838 /* No NEM stuff should ever get here, see assertion in the mapping function. */
3839 AssertReturn(!VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
3840
3841 /* Restore the RAM pages we've replaced. */
3842 PPGMPAGE pPageDst = &pLookupRange->aPages[(pFirstRamRange->GCPhys - pLookupRange->GCPhys) >> GUEST_PAGE_SHIFT];
3843 uint32_t cPagesLeft = pFirstRamRange->cb >> GUEST_PAGE_SHIFT;
3844 pVM->pgm.s.cZeroPages += cPagesLeft;
3845 while (cPagesLeft-- > 0)
3846 {
3847 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
3848 pPageDst++;
3849 }
3850
3851 /* Update range state. */
3852 pFirstMmio2->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
3853 pFirstMmio2->GCPhys = NIL_RTGCPHYS;
3854 Assert(pFirstRamRange->GCPhys == NIL_RTGCPHYS);
3855 Assert(pFirstRamRange->GCPhysLast == NIL_RTGCPHYS);
3856 }
3857 else
3858 {
3859 /*
3860 * Unlink the chunks related to the MMIO/MMIO2 region.
3861 */
3862 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
3863 {
3864 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
3865 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
3866 Assert(pMmio2->idRamRange == pRamRange->idRange);
3867 Assert(pMmio2->GCPhys == pRamRange->GCPhys);
3868
3869#ifdef VBOX_WITH_NATIVE_NEM
3870 if (VM_IS_NEM_ENABLED(pVM)) /* Notify NEM. */
3871 {
3872 uint8_t u2State = UINT8_MAX;
3873 int rc = NEMR3NotifyPhysMmioExUnmap(pVM, pRamRange->GCPhys, pRamRange->cb, fNemFlags,
3874 NULL, pMmio2->pbR3, &u2State, &pRamRange->uNemRange);
3875 AssertLogRelMsgStmt(RT_SUCCESS(rc),
3876 ("NEMR3NotifyPhysMmioExUnmap failed: %Rrc - GCPhys=RGp LB %RGp fNemFlags=%#x pbR3=%p %s\n",
3877 rc, pRamRange->GCPhys, pRamRange->cb, fNemFlags, pMmio2->pbR3, pRamRange->pszDesc),
3878 rcRet = rc);
3879 if (u2State != UINT8_MAX)
3880 pgmPhysSetNemStateForPages(pRamRange->aPages, pRamRange->cb >> GUEST_PAGE_SHIFT, u2State);
3881 }
3882#endif
3883
3884 int rc = pgmR3PhysRamRangeRemoveLookup(pVM, pRamRange, &idxLookup);
3885 AssertLogRelMsgStmt(RT_SUCCESS(rc),
3886 ("pgmR3PhysRamRangeRemoveLookup failed: %Rrc - GCPhys=%RGp LB %RGp %s\n",
3887 rc, pRamRange->GCPhys, pRamRange->cb, pRamRange->pszDesc),
3888 rcRet = rc);
3889
3890 pMmio2->GCPhys = NIL_RTGCPHYS;
3891 pMmio2->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
3892 Assert(pRamRange->GCPhys == NIL_RTGCPHYS);
3893 Assert(pRamRange->GCPhysLast == NIL_RTGCPHYS);
3894 }
3895 }
3896
3897 /* Force a PGM pool flush as guest ram references have been changed. */
3898 /** @todo not entirely SMP safe; assuming for now the guest takes care
3899 * of this internally (not touch mapped mmio while changing the
3900 * mapping). */
3901 PVMCPU pVCpu = VMMGetCpu(pVM);
3902 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3903 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3904
3905 pgmPhysInvalidatePageMapTLB(pVM);
3906 pgmPhysInvalidRamRangeTlbs(pVM);
3907
3908 return rcRet;
3909}
3910
3911
3912/**
3913 * Unmaps an MMIO2 region.
3914 *
3915 * This is typically done when a guest / the bios / state loading changes the
3916 * PCI config. The replacing of base memory has the same restrictions as during
3917 * registration, of course.
3918 */
3919VMMR3_INT_DECL(int) PGMR3PhysMmio2Unmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
3920{
3921 /*
3922 * Validate input
3923 */
3924 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3925 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3926 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
3927 if (GCPhys != NIL_RTGCPHYS)
3928 {
3929 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
3930 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3931 }
3932
3933 uint32_t cChunks = 0;
3934 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
3935 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst);
3936
3937
3938 /*
3939 * Take the PGM lock and call worker.
3940 */
3941 int rc = PGM_LOCK(pVM);
3942 AssertRCReturn(rc, rc);
3943
3944 rc = pgmR3PhysMmio2UnmapLocked(pVM, idxFirst, cChunks, GCPhys);
3945#ifdef VBOX_STRICT
3946 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
3947#endif
3948
3949 PGM_UNLOCK(pVM);
3950 return rc;
3951}
3952
3953
3954/**
3955 * Reduces the mapping size of a MMIO2 region.
3956 *
3957 * This is mainly for dealing with old saved states after changing the default
3958 * size of a mapping region. See PDMDevHlpMmio2Reduce and
3959 * PDMPCIDEV::pfnRegionLoadChangeHookR3.
3960 *
3961 * The region must not currently be mapped when making this call. The VM state
3962 * must be state restore or VM construction.
3963 *
3964 * @returns VBox status code.
3965 * @param pVM The cross context VM structure.
3966 * @param pDevIns The device instance owning the region.
3967 * @param hMmio2 The handle of the region to reduce.
3968 * @param cbRegion The new mapping size.
3969 */
3970VMMR3_INT_DECL(int) PGMR3PhysMmio2Reduce(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS cbRegion)
3971{
3972 /*
3973 * Validate input
3974 */
3975 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3976 AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE && hMmio2 != 0 && hMmio2 <= RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges),
3977 VERR_INVALID_HANDLE);
3978 AssertReturn(cbRegion >= GUEST_PAGE_SIZE, VERR_INVALID_PARAMETER);
3979 AssertReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
3980
3981 PVMCPU const pVCpu = VMMGetCpu(pVM);
3982 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
3983
3984 VMSTATE const enmVmState = VMR3GetState(pVM);
3985 AssertLogRelMsgReturn( enmVmState == VMSTATE_CREATING
3986 || enmVmState == VMSTATE_LOADING,
3987 ("enmVmState=%d (%s)\n", enmVmState, VMR3GetStateName(enmVmState)),
3988 VERR_VM_INVALID_VM_STATE);
3989
3990 /*
3991 * Grab the PGM lock and validate the request properly.
3992 */
3993 int rc = PGM_LOCK(pVM);
3994 AssertRCReturn(rc, rc);
3995
3996 uint32_t cChunks = 0;
3997 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
3998 if ((int32_t)idxFirst >= 0)
3999 {
4000 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst];
4001 PPGMRAMRANGE const pFirstRamRange = pVM->pgm.s.apMmio2RamRanges[idxFirst];
4002 if ( !(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
4003 && pFirstMmio2->GCPhys == NIL_RTGCPHYS)
4004 {
4005 /*
4006 * NOTE! Current implementation does not support multiple ranges.
4007 * Implement when there is a real world need and thus a testcase.
4008 */
4009 if (cChunks == 1)
4010 {
4011 /*
4012 * The request has to be within the initial size.
4013 */
4014 if (cbRegion <= pFirstMmio2->cbReal)
4015 {
4016 /*
4017 * All we have to do is modify the size stored in the RAM range,
4018 * as it is the one used when mapping it and such.
4019 * The two page counts stored in PGMR0PERVM remain unchanged.
4020 */
4021 Log(("PGMR3PhysMmio2Reduce: %s changes from %#RGp bytes (%#RGp) to %#RGp bytes.\n",
4022 pFirstRamRange->pszDesc, pFirstRamRange->cb, pFirstMmio2->cbReal, cbRegion));
4023 pFirstRamRange->cb = cbRegion;
4024 rc = VINF_SUCCESS;
4025 }
4026 else
4027 {
4028 AssertLogRelMsgFailed(("MMIO2/%s: cbRegion=%#RGp > cbReal=%#RGp\n",
4029 pFirstRamRange->pszDesc, cbRegion, pFirstMmio2->cbReal));
4030 rc = VERR_OUT_OF_RANGE;
4031 }
4032 }
4033 else
4034 {
4035 AssertLogRelMsgFailed(("MMIO2/%s: more than one chunk: %d (flags=%#x)\n",
4036 pFirstRamRange->pszDesc, cChunks, pFirstMmio2->fFlags));
4037 rc = VERR_NOT_SUPPORTED;
4038 }
4039 }
4040 else
4041 {
4042 AssertLogRelMsgFailed(("MMIO2/%s: cannot change size of mapped range: %RGp..%RGp\n", pFirstRamRange->pszDesc,
4043 pFirstMmio2->GCPhys, pFirstMmio2->GCPhys + pFirstRamRange->cb - 1U));
4044 rc = VERR_WRONG_ORDER;
4045 }
4046 }
4047 else
4048 rc = (int32_t)idxFirst;
4049
4050 PGM_UNLOCK(pVM);
4051 return rc;
4052}
4053
4054
4055/**
4056 * Validates @a hMmio2, making sure it belongs to @a pDevIns.
4057 *
4058 * @returns VBox status code.
4059 * @param pVM The cross context VM structure.
4060 * @param pDevIns The device which allegedly owns @a hMmio2.
4061 * @param hMmio2 The handle to validate.
4062 */
4063VMMR3_INT_DECL(int) PGMR3PhysMmio2ValidateHandle(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
4064{
4065 /*
4066 * Validate input
4067 */
4068 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
4069 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
4070
4071 /*
4072 * Just do this the simple way.
4073 */
4074 int rc = PGM_LOCK_VOID(pVM);
4075 AssertRCReturn(rc, rc);
4076 uint32_t cChunks;
4077 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
4078 PGM_UNLOCK(pVM);
4079 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst);
4080 return VINF_SUCCESS;
4081}
4082
4083
4084/**
4085 * Gets the mapping address of an MMIO2 region.
4086 *
4087 * @returns Mapping address, NIL_RTGCPHYS if not mapped or invalid handle.
4088 *
4089 * @param pVM The cross context VM structure.
4090 * @param pDevIns The device owning the MMIO2 handle.
4091 * @param hMmio2 The region handle.
4092 */
4093VMMR3_INT_DECL(RTGCPHYS) PGMR3PhysMmio2GetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
4094{
4095 RTGCPHYS GCPhysRet = NIL_RTGCPHYS;
4096
4097 int rc = PGM_LOCK_VOID(pVM);
4098 AssertRCReturn(rc, NIL_RTGCPHYS);
4099
4100 uint32_t cChunks;
4101 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
4102 if ((int32_t)idxFirst >= 0)
4103 GCPhysRet = pVM->pgm.s.aMmio2Ranges[idxFirst].GCPhys;
4104
4105 PGM_UNLOCK(pVM);
4106 return NIL_RTGCPHYS;
4107}
4108
4109
4110/**
4111 * Worker for PGMR3PhysMmio2QueryAndResetDirtyBitmap.
4112 *
4113 * Called holding the PGM lock.
4114 */
4115static int pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
4116 void *pvBitmap, size_t cbBitmap)
4117{
4118 /*
4119 * Continue validation.
4120 */
4121 uint32_t cChunks;
4122 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
4123 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst);
4124 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst];
4125 AssertReturn(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, VERR_INVALID_FUNCTION);
4126
4127 int rc = VINF_SUCCESS;
4128 if (cbBitmap || pvBitmap)
4129 {
4130 /*
4131 * Check the bitmap size and collect all the dirty flags.
4132 */
4133 RTGCPHYS cbTotal = 0;
4134 uint16_t fTotalDirty = 0;
4135 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
4136 {
4137 /* Not using cbReal here, because NEM is not in on the creating, only the mapping. */
4138 cbTotal += pVM->pgm.s.apMmio2RamRanges[idx]->cb;
4139 fTotalDirty |= pVM->pgm.s.aMmio2Ranges[idx].fFlags;
4140 }
4141 size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, GUEST_PAGE_SIZE * 64, RTGCPHYS) / GUEST_PAGE_SIZE / 8;
4142
4143 AssertPtrReturn(pvBitmap, VERR_INVALID_POINTER);
4144 AssertReturn(RT_ALIGN_P(pvBitmap, sizeof(uint64_t)) == pvBitmap, VERR_INVALID_POINTER);
4145 AssertReturn(cbBitmap == cbTotalBitmap, VERR_INVALID_PARAMETER);
4146
4147#ifdef VBOX_WITH_PGM_NEM_MODE
4148 /*
4149 * If there is no physical handler we must be in NEM mode and NEM
4150 * taking care of the dirty bit collecting.
4151 */
4152 if (pFirstMmio2->pPhysHandlerR3 == NULL)
4153 {
4154/** @todo This does not integrate at all with --execute-all-in-iem, leaving the
4155 * screen blank when using it together with --driverless. Fixing this won't be
4156 * entirely easy as we take the PGM_PAGE_HNDL_PHYS_STATE_DISABLED page status to
4157 * mean a dirty page. */
4158 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
4159 uint8_t *pbBitmap = (uint8_t *)pvBitmap;
4160 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
4161 {
4162 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
4163 size_t const cbBitmapChunk = (pRamRange->cb / GUEST_PAGE_SIZE + 7) / 8;
4164 Assert((RTGCPHYS)cbBitmapChunk * GUEST_PAGE_SIZE * 8 == pRamRange->cb);
4165 Assert(pRamRange->GCPhys == pVM->pgm.s.aMmio2Ranges[idx].GCPhys); /* (No MMIO2 inside RAM in NEM mode!)*/
4166 int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pRamRange->GCPhys, pRamRange->cb,
4167 pRamRange->uNemRange, pbBitmap, cbBitmapChunk);
4168 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
4169 rc = rc2;
4170 pbBitmap += pRamRange->cb / GUEST_PAGE_SIZE / 8;
4171 }
4172 }
4173 else
4174#endif
4175 if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY)
4176 {
4177 if ( (pFirstMmio2->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
4178 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
4179 {
4180 /*
4181 * Reset each chunk, gathering dirty bits.
4182 */
4183 RT_BZERO(pvBitmap, cbBitmap); /* simpler for now. */
4184 for (uint32_t iChunk = 0, idx = idxFirst, iPageNo = 0; iChunk < cChunks; iChunk++, idx++)
4185 {
4186 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
4187 if (pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
4188 {
4189 int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, pMmio2->GCPhys, pvBitmap, iPageNo);
4190 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
4191 rc = rc2;
4192 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
4193 }
4194 iPageNo += pVM->pgm.s.apMmio2RamRanges[idx]->cb >> GUEST_PAGE_SHIFT;
4195 }
4196 }
4197 else
4198 {
4199 /*
4200 * If not mapped or tracking is disabled, we return the
4201 * PGMREGMMIO2RANGE_F_IS_DIRTY status for all pages. We cannot
4202 * get more accurate data than that after unmapping or disabling.
4203 */
4204 RT_BZERO(pvBitmap, cbBitmap);
4205 for (uint32_t iChunk = 0, idx = idxFirst, iPageNo = 0; iChunk < cChunks; iChunk++, idx++)
4206 {
4207 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
4208 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
4209 if (pMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
4210 {
4211 ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (pRamRange->cb >> GUEST_PAGE_SHIFT));
4212 pMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
4213 }
4214 iPageNo += pRamRange->cb >> GUEST_PAGE_SHIFT;
4215 }
4216 }
4217 }
4218 /*
4219 * No dirty chunks.
4220 */
4221 else
4222 RT_BZERO(pvBitmap, cbBitmap);
4223 }
4224 /*
4225 * No bitmap. Reset the region if tracking is currently enabled.
4226 */
4227 else if ( (pFirstMmio2->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
4228 == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
4229 {
4230#ifdef VBOX_WITH_PGM_NEM_MODE
4231 if (pFirstMmio2->pPhysHandlerR3 == NULL)
4232 {
4233 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
4234 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
4235 {
4236 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
4237 Assert(pRamRange->GCPhys == pVM->pgm.s.aMmio2Ranges[idx].GCPhys); /* (No MMIO2 inside RAM in NEM mode!)*/
4238 int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pRamRange->GCPhys, pRamRange->cb,
4239 pRamRange->uNemRange, NULL, 0);
4240 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
4241 rc = rc2;
4242 }
4243 }
4244 else
4245#endif
4246 {
4247 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
4248 {
4249 pVM->pgm.s.aMmio2Ranges[idx].fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
4250 int rc2 = PGMHandlerPhysicalReset(pVM, pVM->pgm.s.aMmio2Ranges[idx].GCPhys);
4251 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
4252 rc = rc2;
4253 }
4254 }
4255 }
4256
4257 return rc;
4258}
4259
4260
4261/**
4262 * Queries the dirty page bitmap and resets the monitoring.
4263 *
4264 * The PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES flag must be specified when
4265 * creating the range for this to work.
4266 *
4267 * @returns VBox status code.
4268 * @retval VERR_INVALID_FUNCTION if not created using
4269 * PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES.
4270 * @param pVM The cross context VM structure.
4271 * @param pDevIns The device owning the MMIO2 handle.
4272 * @param hMmio2 The region handle.
4273 * @param pvBitmap The output bitmap. Must be 8-byte aligned. Ignored
4274 * when @a cbBitmap is zero.
4275 * @param cbBitmap The size of the bitmap. Must be the size of the whole
4276 * MMIO2 range, rounded up to the nearest 8 bytes.
4277 * When zero only a reset is done.
4278 */
4279VMMR3_INT_DECL(int) PGMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
4280 void *pvBitmap, size_t cbBitmap)
4281{
4282 /*
4283 * Do some basic validation before grapping the PGM lock and continuing.
4284 */
4285 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
4286 AssertReturn(RT_ALIGN_Z(cbBitmap, sizeof(uint64_t)) == cbBitmap, VERR_INVALID_PARAMETER);
4287 int rc = PGM_LOCK(pVM);
4288 if (RT_SUCCESS(rc))
4289 {
4290 STAM_PROFILE_START(&pVM->pgm.s.StatMmio2QueryAndResetDirtyBitmap, a);
4291 rc = pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(pVM, pDevIns, hMmio2, pvBitmap, cbBitmap);
4292 STAM_PROFILE_STOP(&pVM->pgm.s.StatMmio2QueryAndResetDirtyBitmap, a);
4293 PGM_UNLOCK(pVM);
4294 }
4295 return rc;
4296}
4297
4298
4299/**
4300 * Worker for PGMR3PhysMmio2ControlDirtyPageTracking
4301 *
4302 * Called owning the PGM lock.
4303 */
4304static int pgmR3PhysMmio2ControlDirtyPageTrackingLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
4305{
4306 /*
4307 * Continue validation.
4308 */
4309 uint32_t cChunks;
4310 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
4311 AssertReturn((int32_t)idxFirst >= 0, (int32_t)idxFirst);
4312 PPGMREGMMIO2RANGE const pFirstMmio2 = &pVM->pgm.s.aMmio2Ranges[idxFirst];
4313 AssertReturn(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES, VERR_INVALID_FUNCTION);
4314
4315#ifdef VBOX_WITH_PGM_NEM_MODE
4316 /*
4317 * This is a nop if NEM is responsible for doing the tracking, we simply
4318 * leave the tracking on all the time there.
4319 */
4320 if (pFirstMmio2->pPhysHandlerR3 == NULL)
4321 {
4322 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
4323 return VINF_SUCCESS;
4324 }
4325#endif
4326
4327 /*
4328 * Anything needing doing?
4329 */
4330 if (fEnabled != RT_BOOL(pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
4331 {
4332 LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc));
4333
4334 /*
4335 * Update the PGMREGMMIO2RANGE_F_TRACKING_ENABLED flag.
4336 */
4337 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
4338 if (fEnabled)
4339 pVM->pgm.s.aMmio2Ranges[idx].fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
4340 else
4341 pVM->pgm.s.aMmio2Ranges[idx].fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
4342
4343 /*
4344 * Enable/disable handlers if currently mapped.
4345 *
4346 * We ignore status codes here as we've already changed the flags and
4347 * returning a failure status now would be confusing. Besides, the two
4348 * functions will continue past failures. As argued in the mapping code,
4349 * it's in the release log.
4350 */
4351 if (pFirstMmio2->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
4352 {
4353 if (fEnabled)
4354 pgmR3PhysMmio2EnableDirtyPageTracing(pVM, idxFirst, cChunks);
4355 else
4356 pgmR3PhysMmio2DisableDirtyPageTracing(pVM, idxFirst, cChunks);
4357 }
4358 }
4359 else
4360 LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc));
4361
4362 return VINF_SUCCESS;
4363}
4364
4365
4366/**
4367 * Controls the dirty page tracking for an MMIO2 range.
4368 *
4369 * @returns VBox status code.
4370 * @param pVM The cross context VM structure.
4371 * @param pDevIns The device owning the MMIO2 memory.
4372 * @param hMmio2 The handle of the region.
4373 * @param fEnabled The new tracking state.
4374 */
4375VMMR3_INT_DECL(int) PGMR3PhysMmio2ControlDirtyPageTracking(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
4376{
4377 /*
4378 * Do some basic validation before grapping the PGM lock and continuing.
4379 */
4380 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
4381 int rc = PGM_LOCK(pVM);
4382 if (RT_SUCCESS(rc))
4383 {
4384 rc = pgmR3PhysMmio2ControlDirtyPageTrackingLocked(pVM, pDevIns, hMmio2, fEnabled);
4385 PGM_UNLOCK(pVM);
4386 }
4387 return rc;
4388}
4389
4390
4391/**
4392 * Changes the region number of an MMIO2 region.
4393 *
4394 * This is only for dealing with save state issues, nothing else.
4395 *
4396 * @return VBox status code.
4397 *
4398 * @param pVM The cross context VM structure.
4399 * @param pDevIns The device owning the MMIO2 memory.
4400 * @param hMmio2 The handle of the region.
4401 * @param iNewRegion The new region index.
4402 *
4403 * @thread EMT(0)
4404 * @sa @bugref{9359}
4405 */
4406VMMR3_INT_DECL(int) PGMR3PhysMmio2ChangeRegionNo(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, uint32_t iNewRegion)
4407{
4408 /*
4409 * Validate input.
4410 */
4411 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
4412 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_LOADING, VERR_VM_INVALID_VM_STATE);
4413 AssertReturn(iNewRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
4414
4415 int rc = PGM_LOCK(pVM);
4416 AssertRCReturn(rc, rc);
4417
4418 /* Validate and resolve the handle. */
4419 uint32_t cChunks;
4420 uint32_t const idxFirst = pgmR3PhysMmio2ResolveHandle(pVM, pDevIns, hMmio2, &cChunks);
4421 if ((int32_t)idxFirst >= 0)
4422 {
4423 /* Check that the new range number is unused. */
4424 PPGMREGMMIO2RANGE const pConflict = pgmR3PhysMmio2Find(pVM, pDevIns, pVM->pgm.s.aMmio2Ranges[idxFirst].iSubDev,
4425 iNewRegion);
4426 if (!pConflict)
4427 {
4428 /*
4429 * Make the change.
4430 */
4431 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
4432 pVM->pgm.s.aMmio2Ranges[idx].iRegion = (uint8_t)iNewRegion;
4433 rc = VINF_SUCCESS;
4434 }
4435 else
4436 {
4437 AssertLogRelMsgFailed(("MMIO2/%s: iNewRegion=%d conflicts with %s\n", pVM->pgm.s.apMmio2RamRanges[idxFirst]->pszDesc,
4438 iNewRegion, pVM->pgm.s.apMmio2RamRanges[pConflict->idRamRange]->pszDesc));
4439 rc = VERR_RESOURCE_IN_USE;
4440 }
4441 }
4442 else
4443 rc = (int32_t)idxFirst;
4444
4445 PGM_UNLOCK(pVM);
4446 return rc;
4447}
4448
4449
4450
4451/*********************************************************************************************************************************
4452* ROM *
4453*********************************************************************************************************************************/
4454
4455/**
4456 * Worker for PGMR3PhysRomRegister.
4457 *
4458 * This is here to simplify lock management, i.e. the caller does all the
4459 * locking and we can simply return without needing to remember to unlock
4460 * anything first.
4461 *
4462 * @returns VBox status code.
4463 * @param pVM The cross context VM structure.
4464 * @param pDevIns The device instance owning the ROM.
4465 * @param GCPhys First physical address in the range.
4466 * Must be page aligned!
4467 * @param cb The size of the range (in bytes).
4468 * Must be page aligned!
4469 * @param pvBinary Pointer to the binary data backing the ROM image.
4470 * @param cbBinary The size of the binary data pvBinary points to.
4471 * This must be less or equal to @a cb.
4472 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
4473 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
4474 * @param pszDesc Pointer to description string. This must not be freed.
4475 */
4476static int pgmR3PhysRomRegisterLocked(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
4477 const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc)
4478{
4479 /*
4480 * Validate input.
4481 */
4482 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
4483 AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
4484 AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
4485 RTGCPHYS const GCPhysLast = GCPhys + (cb - 1);
4486 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
4487 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
4488 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
4489 AssertReturn(!(fFlags & ~PGMPHYS_ROM_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
4490
4491 PVMCPU const pVCpu = VMMGetCpu(pVM);
4492 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
4493 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
4494
4495 const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT;
4496 AssertReturn(cGuestPages <= PGM_MAX_PAGES_PER_ROM_RANGE, VERR_OUT_OF_RANGE);
4497
4498#ifdef VBOX_WITH_PGM_NEM_MODE
4499 const uint32_t cHostPages = RT_ALIGN_T(cb, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT;
4500#endif
4501
4502 /*
4503 * Make sure we've got a free ROM range.
4504 */
4505 uint8_t const idRomRange = pVM->pgm.s.cRomRanges;
4506 AssertLogRelReturn(idRomRange < RT_ELEMENTS(pVM->pgm.s.apRomRanges), VERR_PGM_TOO_MANY_ROM_RANGES);
4507
4508 /*
4509 * Look thru the existing ROM range and make sure there aren't any
4510 * overlapping registration.
4511 */
4512 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
4513 for (uint32_t idx = 0; idx < cRomRanges; idx++)
4514 {
4515 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
4516 AssertLogRelMsgReturn( GCPhys > pRom->GCPhysLast
4517 || GCPhysLast < pRom->GCPhys,
4518 ("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
4519 GCPhys, GCPhysLast, pszDesc,
4520 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
4521 VERR_PGM_RAM_CONFLICT);
4522 }
4523
4524 /*
4525 * Find the RAM location and check for conflicts.
4526 *
4527 * Conflict detection is a bit different than for RAM registration since a
4528 * ROM can be located within a RAM range. So, what we have to check for is
4529 * other memory types (other than RAM that is) and that we don't span more
4530 * than one RAM range (lazy).
4531 */
4532 uint32_t idxInsert = UINT32_MAX;
4533 PPGMRAMRANGE const pOverlappingRange = pgmR3PhysRamRangeFindOverlapping(pVM, GCPhys, GCPhysLast, &idxInsert);
4534 if (pOverlappingRange)
4535 {
4536 /* completely within? */
4537 AssertLogRelMsgReturn( GCPhys >= pOverlappingRange->GCPhys
4538 && GCPhysLast <= pOverlappingRange->GCPhysLast,
4539 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
4540 GCPhys, GCPhysLast, pszDesc,
4541 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc),
4542 VERR_PGM_RAM_CONFLICT);
4543
4544 /* Check that is isn't an ad hoc range, but a real RAM range. */
4545 AssertLogRelMsgReturn(!PGM_RAM_RANGE_IS_AD_HOC(pOverlappingRange),
4546 ("%RGp-%RGp (ROM/%s) mapping attempt in non-RAM range: %RGp-%RGp (%s)\n",
4547 GCPhys, GCPhysLast, pszDesc,
4548 pOverlappingRange->GCPhys, pOverlappingRange->GCPhysLast, pOverlappingRange->pszDesc),
4549 VERR_PGM_RAM_CONFLICT);
4550
4551 /* All the pages must be RAM pages. */
4552 PPGMPAGE pPage = &pOverlappingRange->aPages[(GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT];
4553 uint32_t cPagesLeft = cGuestPages;
4554 while (cPagesLeft-- > 0)
4555 {
4556 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
4557 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
4558 GCPhys + ((RTGCPHYS)cPagesLeft << GUEST_PAGE_SHIFT), pPage, GCPhys, GCPhysLast, pszDesc),
4559 VERR_PGM_RAM_CONFLICT);
4560 AssertLogRelMsgReturn(PGM_PAGE_IS_ZERO(pPage) || PGM_IS_IN_NEM_MODE(pVM),
4561 ("%RGp (%R[pgmpage]) is not a ZERO page - registering %RGp-%RGp (%s).\n",
4562 GCPhys + ((RTGCPHYS)cPagesLeft << GUEST_PAGE_SHIFT), pPage, GCPhys, GCPhysLast, pszDesc),
4563 VERR_PGM_UNEXPECTED_PAGE_STATE);
4564 pPage++;
4565 }
4566 }
4567
4568 /*
4569 * Update the base memory reservation if necessary.
4570 */
4571 uint32_t const cExtraBaseCost = (pOverlappingRange ? 0 : cGuestPages)
4572 + (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? cGuestPages : 0);
4573 if (cExtraBaseCost)
4574 {
4575 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
4576 AssertRCReturn(rc, rc);
4577 }
4578
4579#ifdef VBOX_WITH_NATIVE_NEM
4580 /*
4581 * Early NEM notification before we've made any changes or anything.
4582 */
4583 uint32_t const fNemNotify = (pOverlappingRange ? NEM_NOTIFY_PHYS_ROM_F_REPLACE : 0)
4584 | (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? NEM_NOTIFY_PHYS_ROM_F_SHADOW : 0);
4585 uint8_t u2NemState = UINT8_MAX;
4586 uint32_t uNemRange = 0;
4587 if (VM_IS_NEM_ENABLED(pVM))
4588 {
4589 int rc = NEMR3NotifyPhysRomRegisterEarly(pVM, GCPhys, cGuestPages << GUEST_PAGE_SHIFT,
4590 pOverlappingRange
4591 ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pOverlappingRange, GCPhys) : NULL,
4592 fNemNotify, &u2NemState,
4593 pOverlappingRange ? &pOverlappingRange->uNemRange : &uNemRange);
4594 AssertLogRelRCReturn(rc, rc);
4595 }
4596#endif
4597
4598 /*
4599 * Allocate memory for the virgin copy of the RAM. In simplified memory
4600 * mode, we allocate memory for any ad-hoc RAM range and for shadow pages.
4601 */
4602 int rc;
4603 PGMMALLOCATEPAGESREQ pReq = NULL;
4604#ifdef VBOX_WITH_PGM_NEM_MODE
4605 void *pvRam = NULL;
4606 void *pvAlt = NULL;
4607 if (PGM_IS_IN_NEM_MODE(pVM))
4608 {
4609 if (!pOverlappingRange)
4610 {
4611 rc = SUPR3PageAlloc(cHostPages, 0, &pvRam);
4612 if (RT_FAILURE(rc))
4613 return rc;
4614 }
4615 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4616 {
4617 rc = SUPR3PageAlloc(cHostPages, 0, &pvAlt);
4618 if (RT_FAILURE(rc))
4619 {
4620 if (pvRam)
4621 SUPR3PageFree(pvRam, cHostPages);
4622 return rc;
4623 }
4624 }
4625 }
4626 else
4627#endif
4628 {
4629 rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cGuestPages, GMMACCOUNT_BASE);
4630 AssertRCReturn(rc, rc);
4631
4632 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
4633 {
4634 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << GUEST_PAGE_SHIFT);
4635 pReq->aPages[iPage].fZeroed = false;
4636 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
4637 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
4638 }
4639
4640 rc = GMMR3AllocatePagesPerform(pVM, pReq);
4641 if (RT_FAILURE(rc))
4642 {
4643 GMMR3AllocatePagesCleanup(pReq);
4644 return rc;
4645 }
4646 }
4647
4648 /*
4649 * Allocate a RAM range if required.
4650 * Note! We don't clean up the RAM range here on failure, VM destruction does that.
4651 */
4652 rc = VINF_SUCCESS;
4653 PPGMRAMRANGE pRamRange = NULL;
4654 if (!pOverlappingRange)
4655 rc = pgmR3PhysAllocateRamRange(pVM, pVCpu, cGuestPages, PGM_RAM_RANGE_FLAGS_AD_HOC_ROM, &pRamRange);
4656 if (RT_SUCCESS(rc))
4657 {
4658 /*
4659 * Allocate a ROM range.
4660 * Note! We don't clean up the ROM range here on failure, VM destruction does that.
4661 */
4662 if (SUPR3IsDriverless())
4663 rc = pgmPhysRomRangeAllocCommon(pVM, cGuestPages, idRomRange, fFlags);
4664 else
4665 {
4666 PGMPHYSROMALLOCATERANGEREQ RomRangeReq;
4667 RomRangeReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
4668 RomRangeReq.Hdr.cbReq = sizeof(RomRangeReq);
4669 RomRangeReq.cbGuestPage = GUEST_PAGE_SIZE;
4670 RomRangeReq.cGuestPages = cGuestPages;
4671 RomRangeReq.idRomRange = idRomRange;
4672 RomRangeReq.fFlags = fFlags;
4673 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_PHYS_ROM_ALLOCATE_RANGE, 0 /*u64Arg*/, &RomRangeReq.Hdr);
4674 }
4675 }
4676 if (RT_SUCCESS(rc))
4677 {
4678 /*
4679 * Initialize and map the RAM range (if required).
4680 */
4681 PPGMROMRANGE const pRomRange = pVM->pgm.s.apRomRanges[idRomRange];
4682 AssertPtr(pRomRange);
4683 uint32_t const idxFirstRamPage = pOverlappingRange ? (GCPhys - pOverlappingRange->GCPhys) >> GUEST_PAGE_SHIFT : 0;
4684 PPGMROMPAGE pRomPage = &pRomRange->aPages[0];
4685 if (!pOverlappingRange)
4686 {
4687 /* Initialize the new RAM range and insert it into the lookup table. */
4688 pRamRange->pszDesc = pszDesc;
4689#ifdef VBOX_WITH_NATIVE_NEM
4690 pRamRange->uNemRange = uNemRange;
4691#endif
4692
4693 PPGMPAGE pRamPage = &pRamRange->aPages[idxFirstRamPage];
4694#ifdef VBOX_WITH_PGM_NEM_MODE
4695 if (PGM_IS_IN_NEM_MODE(pVM))
4696 {
4697 AssertPtr(pvRam); Assert(pReq == NULL);
4698 pRamRange->pbR3 = (uint8_t *)pvRam;
4699 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
4700 {
4701 PGM_PAGE_INIT(pRamPage, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
4702 PGMPAGETYPE_ROM, PGM_PAGE_STATE_ALLOCATED);
4703 pRomPage->Virgin = *pRamPage;
4704 }
4705 }
4706 else
4707#endif
4708 {
4709 Assert(!pRamRange->pbR3); Assert(!pvRam);
4710 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
4711 {
4712 PGM_PAGE_INIT(pRamPage,
4713 pReq->aPages[iPage].HCPhysGCPhys,
4714 pReq->aPages[iPage].idPage,
4715 PGMPAGETYPE_ROM,
4716 PGM_PAGE_STATE_ALLOCATED);
4717
4718 pRomPage->Virgin = *pRamPage;
4719 }
4720 }
4721
4722 pVM->pgm.s.cAllPages += cGuestPages;
4723 pVM->pgm.s.cPrivatePages += cGuestPages;
4724
4725 rc = pgmR3PhysRamRangeInsertLookup(pVM, pRamRange, GCPhys, &idxInsert);
4726 }
4727 else
4728 {
4729 /* Insert the ROM into an existing RAM range. */
4730 PPGMPAGE pRamPage = &pOverlappingRange->aPages[idxFirstRamPage];
4731#ifdef VBOX_WITH_PGM_NEM_MODE
4732 if (PGM_IS_IN_NEM_MODE(pVM))
4733 {
4734 Assert(pvRam == NULL); Assert(pReq == NULL);
4735 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
4736 {
4737 Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000));
4738 Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID);
4739 Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED);
4740 PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_ROM);
4741 PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
4742 PGM_PAGE_SET_PDE_TYPE(pVM, pRamPage, PGM_PAGE_PDE_TYPE_DONTCARE);
4743 PGM_PAGE_SET_PTE_INDEX(pVM, pRamPage, 0);
4744 PGM_PAGE_SET_TRACKING(pVM, pRamPage, 0);
4745
4746 pRomPage->Virgin = *pRamPage;
4747 }
4748 }
4749 else
4750#endif
4751 {
4752 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
4753 {
4754 PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_ROM);
4755 PGM_PAGE_SET_HCPHYS(pVM, pRamPage, pReq->aPages[iPage].HCPhysGCPhys);
4756 PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
4757 PGM_PAGE_SET_PAGEID(pVM, pRamPage, pReq->aPages[iPage].idPage);
4758 PGM_PAGE_SET_PDE_TYPE(pVM, pRamPage, PGM_PAGE_PDE_TYPE_DONTCARE);
4759 PGM_PAGE_SET_PTE_INDEX(pVM, pRamPage, 0);
4760 PGM_PAGE_SET_TRACKING(pVM, pRamPage, 0);
4761
4762 pRomPage->Virgin = *pRamPage;
4763 }
4764 pVM->pgm.s.cZeroPages -= cGuestPages;
4765 pVM->pgm.s.cPrivatePages += cGuestPages;
4766 }
4767 pRamRange = pOverlappingRange;
4768 }
4769
4770 if (RT_SUCCESS(rc))
4771 {
4772#ifdef VBOX_WITH_NATIVE_NEM
4773 /* Set the NEM state of the pages if needed. */
4774 if (u2NemState != UINT8_MAX)
4775 pgmPhysSetNemStateForPages(&pRamRange->aPages[idxFirstRamPage], cGuestPages, u2NemState);
4776#endif
4777
4778 /* Flush physical page map TLB. */
4779 pgmPhysInvalidatePageMapTLB(pVM);
4780
4781 /*
4782 * Register the ROM access handler.
4783 */
4784 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType, idRomRange, pszDesc);
4785 if (RT_SUCCESS(rc))
4786 {
4787 /*
4788 * Copy the image over to the virgin pages.
4789 * This must be done after linking in the RAM range.
4790 */
4791 size_t cbBinaryLeft = cbBinary;
4792 PPGMPAGE pRamPage = &pRamRange->aPages[idxFirstRamPage];
4793 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++)
4794 {
4795 void *pvDstPage;
4796 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << GUEST_PAGE_SHIFT), &pvDstPage);
4797 if (RT_FAILURE(rc))
4798 {
4799 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
4800 break;
4801 }
4802 if (cbBinaryLeft >= GUEST_PAGE_SIZE)
4803 {
4804 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), GUEST_PAGE_SIZE);
4805 cbBinaryLeft -= GUEST_PAGE_SIZE;
4806 }
4807 else
4808 {
4809 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE); /* (shouldn't be necessary, but can't hurt either) */
4810 if (cbBinaryLeft > 0)
4811 {
4812 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), cbBinaryLeft);
4813 cbBinaryLeft = 0;
4814 }
4815 }
4816 }
4817 if (RT_SUCCESS(rc))
4818 {
4819 /*
4820 * Initialize the ROM range.
4821 * Note that the Virgin member of the pages has already been initialized above.
4822 */
4823 Assert(pRomRange->cb == cb);
4824 Assert(pRomRange->fFlags == fFlags);
4825 Assert(pRomRange->idSavedState == UINT8_MAX);
4826 pRomRange->GCPhys = GCPhys;
4827 pRomRange->GCPhysLast = GCPhysLast;
4828 pRomRange->cbOriginal = cbBinary;
4829 pRomRange->pszDesc = pszDesc;
4830#ifdef VBOX_WITH_PGM_NEM_MODE
4831 pRomRange->pbR3Alternate = (uint8_t *)pvAlt;
4832#endif
4833 pRomRange->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY
4834 ? pvBinary : RTMemDup(pvBinary, cbBinary);
4835 if (pRomRange->pvOriginal)
4836 {
4837 for (unsigned iPage = 0; iPage < cGuestPages; iPage++)
4838 {
4839 PPGMROMPAGE const pPage = &pRomRange->aPages[iPage];
4840 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
4841#ifdef VBOX_WITH_PGM_NEM_MODE
4842 if (PGM_IS_IN_NEM_MODE(pVM))
4843 PGM_PAGE_INIT(&pPage->Shadow, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
4844 PGMPAGETYPE_ROM_SHADOW, PGM_PAGE_STATE_ALLOCATED);
4845 else
4846#endif
4847 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
4848 }
4849
4850 /* update the page count stats for the shadow pages. */
4851 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4852 {
4853 if (PGM_IS_IN_NEM_MODE(pVM))
4854 pVM->pgm.s.cPrivatePages += cGuestPages;
4855 else
4856 pVM->pgm.s.cZeroPages += cGuestPages;
4857 pVM->pgm.s.cAllPages += cGuestPages;
4858 }
4859
4860#ifdef VBOX_WITH_NATIVE_NEM
4861 /*
4862 * Notify NEM again.
4863 */
4864 if (VM_IS_NEM_ENABLED(pVM))
4865 {
4866 u2NemState = UINT8_MAX;
4867 rc = NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamRange, GCPhys),
4868 fNemNotify, &u2NemState, &pRamRange->uNemRange);
4869 if (u2NemState != UINT8_MAX)
4870 pgmPhysSetNemStateForPages(&pRamRange->aPages[idxFirstRamPage], cGuestPages, u2NemState);
4871 }
4872 else
4873#endif
4874 GMMR3AllocatePagesCleanup(pReq);
4875 if (RT_SUCCESS(rc))
4876 {
4877 /*
4878 * Done!
4879 */
4880#ifdef VBOX_STRICT
4881 pgmPhysAssertRamRangesLocked(pVM, false /*fInUpdate*/, false /*fRamRelaxed*/);
4882#endif
4883 return rc;
4884 }
4885
4886 /*
4887 * bail out
4888 */
4889#ifdef VBOX_WITH_NATIVE_NEM
4890 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4891 {
4892 Assert(VM_IS_NEM_ENABLED(pVM));
4893 pVM->pgm.s.cPrivatePages -= cGuestPages;
4894 pVM->pgm.s.cAllPages -= cGuestPages;
4895 }
4896#endif
4897 }
4898 else
4899 rc = VERR_NO_MEMORY;
4900 }
4901
4902 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
4903 AssertRC(rc2);
4904 }
4905
4906 idxInsert -= 1;
4907 if (!pOverlappingRange)
4908 pgmR3PhysRamRangeRemoveLookup(pVM, pRamRange, &idxInsert);
4909 }
4910 /* else: lookup insertion failed. */
4911
4912 if (pOverlappingRange)
4913 {
4914 PPGMPAGE pRamPage = &pOverlappingRange->aPages[idxFirstRamPage];
4915#ifdef VBOX_WITH_PGM_NEM_MODE
4916 if (PGM_IS_IN_NEM_MODE(pVM))
4917 {
4918 Assert(pvRam == NULL); Assert(pReq == NULL);
4919 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
4920 {
4921 Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000));
4922 Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID);
4923 Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED);
4924 PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_RAM);
4925 PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
4926 }
4927 }
4928 else
4929#endif
4930 {
4931 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++)
4932 PGM_PAGE_INIT_ZERO(pRamPage, pVM, PGMPAGETYPE_RAM);
4933 pVM->pgm.s.cZeroPages += cGuestPages;
4934 pVM->pgm.s.cPrivatePages -= cGuestPages;
4935 }
4936 }
4937 }
4938 pgmPhysInvalidatePageMapTLB(pVM);
4939 pgmPhysInvalidRamRangeTlbs(pVM);
4940
4941#ifdef VBOX_WITH_PGM_NEM_MODE
4942 if (PGM_IS_IN_NEM_MODE(pVM))
4943 {
4944 Assert(!pReq);
4945 if (pvRam)
4946 SUPR3PageFree(pvRam, cHostPages);
4947 if (pvAlt)
4948 SUPR3PageFree(pvAlt, cHostPages);
4949 }
4950 else
4951#endif
4952 {
4953 GMMR3FreeAllocatedPages(pVM, pReq);
4954 GMMR3AllocatePagesCleanup(pReq);
4955 }
4956
4957 /* We don't bother to actually free either the ROM nor the RAM ranges
4958 themselves, as already mentioned above, we'll leave that to the VM
4959 termination cleanup code. */
4960 return rc;
4961}
4962
4963
4964/**
4965 * Registers a ROM image.
4966 *
4967 * Shadowed ROM images requires double the amount of backing memory, so,
4968 * don't use that unless you have to. Shadowing of ROM images is process
4969 * where we can select where the reads go and where the writes go. On real
4970 * hardware the chipset provides means to configure this. We provide
4971 * PGMR3PhysRomProtect() for this purpose.
4972 *
4973 * A read-only copy of the ROM image will always be kept around while we
4974 * will allocate RAM pages for the changes on demand (unless all memory
4975 * is configured to be preallocated).
4976 *
4977 * @returns VBox status code.
4978 * @param pVM The cross context VM structure.
4979 * @param pDevIns The device instance owning the ROM.
4980 * @param GCPhys First physical address in the range.
4981 * Must be page aligned!
4982 * @param cb The size of the range (in bytes).
4983 * Must be page aligned!
4984 * @param pvBinary Pointer to the binary data backing the ROM image.
4985 * @param cbBinary The size of the binary data pvBinary points to.
4986 * This must be less or equal to @a cb.
4987 * @param fFlags Mask of flags, PGMPHYS_ROM_FLAGS_XXX.
4988 * @param pszDesc Pointer to description string. This must not be freed.
4989 *
4990 * @remark There is no way to remove the rom, automatically on device cleanup or
4991 * manually from the device yet. This isn't difficult in any way, it's
4992 * just not something we expect to be necessary for a while.
4993 */
4994VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
4995 const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc)
4996{
4997 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p cbBinary=%#x fFlags=%#x pszDesc=%s\n",
4998 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc));
4999 PGM_LOCK_VOID(pVM);
5000
5001 int rc = pgmR3PhysRomRegisterLocked(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc);
5002
5003 PGM_UNLOCK(pVM);
5004 return rc;
5005}
5006
5007
5008/**
5009 * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify
5010 * that the virgin part is untouched.
5011 *
5012 * This is done after the normal memory has been cleared.
5013 *
5014 * ASSUMES that the caller owns the PGM lock.
5015 *
5016 * @param pVM The cross context VM structure.
5017 */
5018int pgmR3PhysRomReset(PVM pVM)
5019{
5020 PGM_LOCK_ASSERT_OWNER(pVM);
5021 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
5022 for (uint32_t idx = 0; idx < cRomRanges; idx++)
5023 {
5024 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
5025 uint32_t const cGuestPages = pRom->cb >> GUEST_PAGE_SHIFT;
5026
5027 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
5028 {
5029 /*
5030 * Reset the physical handler.
5031 */
5032 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
5033 AssertRCReturn(rc, rc);
5034
5035 /*
5036 * What we do with the shadow pages depends on the memory
5037 * preallocation option. If not enabled, we'll just throw
5038 * out all the dirty pages and replace them by the zero page.
5039 */
5040#ifdef VBOX_WITH_PGM_NEM_MODE
5041 if (PGM_IS_IN_NEM_MODE(pVM))
5042 {
5043 /* Clear all the shadow pages (currently using alternate backing). */
5044 RT_BZERO(pRom->pbR3Alternate, pRom->cb);
5045 }
5046 else
5047#endif
5048 if (!pVM->pgm.s.fRamPreAlloc)
5049 {
5050 /* Free the dirty pages. */
5051 uint32_t cPendingPages = 0;
5052 PGMMFREEPAGESREQ pReq;
5053 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
5054 AssertRCReturn(rc, rc);
5055
5056 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
5057 if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
5058 && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
5059 {
5060 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
5061 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow,
5062 pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT),
5063 (PGMPAGETYPE)PGM_PAGE_GET_TYPE(&pRom->aPages[iPage].Shadow));
5064 AssertLogRelRCReturn(rc, rc);
5065 }
5066
5067 if (cPendingPages)
5068 {
5069 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
5070 AssertLogRelRCReturn(rc, rc);
5071 }
5072 GMMR3FreePagesCleanup(pReq);
5073 }
5074 else
5075 {
5076 /* clear all the shadow pages. */
5077 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
5078 {
5079 if (PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow))
5080 continue;
5081 Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
5082 void *pvDstPage;
5083 RTGCPHYS const GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
5084 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
5085 if (RT_FAILURE(rc))
5086 break;
5087 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
5088 }
5089 AssertRCReturn(rc, rc);
5090 }
5091 }
5092
5093 /*
5094 * Restore the original ROM pages after a saved state load.
5095 * Also, in strict builds check that ROM pages remain unmodified.
5096 */
5097#ifndef VBOX_STRICT
5098 if (pVM->pgm.s.fRestoreRomPagesOnReset)
5099#endif
5100 {
5101 size_t cbSrcLeft = pRom->cbOriginal;
5102 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
5103 uint32_t cRestored = 0;
5104 for (uint32_t iPage = 0; iPage < cGuestPages && cbSrcLeft > 0; iPage++, pbSrcPage += GUEST_PAGE_SIZE)
5105 {
5106 RTGCPHYS const GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
5107 PPGMPAGE const pPage = pgmPhysGetPage(pVM, GCPhys);
5108 void const *pvDstPage = NULL;
5109 int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvDstPage);
5110 if (RT_FAILURE(rc))
5111 break;
5112
5113 if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE)))
5114 {
5115 if (pVM->pgm.s.fRestoreRomPagesOnReset)
5116 {
5117 void *pvDstPageW = NULL;
5118 rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvDstPageW);
5119 AssertLogRelRCReturn(rc, rc);
5120 memcpy(pvDstPageW, pbSrcPage, RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE));
5121 cRestored++;
5122 }
5123 else
5124 LogRel(("pgmR3PhysRomReset: %RGp: ROM page changed (%s)\n", GCPhys, pRom->pszDesc));
5125 }
5126 cbSrcLeft -= RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE);
5127 }
5128 if (cRestored > 0)
5129 LogRel(("PGM: ROM \"%s\": Reloaded %u of %u pages.\n", pRom->pszDesc, cRestored, cGuestPages));
5130 }
5131 }
5132
5133 /* Clear the ROM restore flag now as we only need to do this once after
5134 loading saved state. */
5135 pVM->pgm.s.fRestoreRomPagesOnReset = false;
5136
5137 return VINF_SUCCESS;
5138}
5139
5140
5141/**
5142 * Called by PGMR3Term to free resources.
5143 *
5144 * ASSUMES that the caller owns the PGM lock.
5145 *
5146 * @param pVM The cross context VM structure.
5147 */
5148void pgmR3PhysRomTerm(PVM pVM)
5149{
5150 /*
5151 * Free the heap copy of the original bits.
5152 */
5153 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
5154 for (uint32_t idx = 0; idx < cRomRanges; idx++)
5155 {
5156 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
5157 if ( pRom->pvOriginal
5158 && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY))
5159 {
5160 RTMemFree((void *)pRom->pvOriginal);
5161 pRom->pvOriginal = NULL;
5162 }
5163 }
5164}
5165
5166
5167/**
5168 * Change the shadowing of a range of ROM pages.
5169 *
5170 * This is intended for implementing chipset specific memory registers
5171 * and will not be very strict about the input. It will silently ignore
5172 * any pages that are not the part of a shadowed ROM.
5173 *
5174 * @returns VBox status code.
5175 * @retval VINF_PGM_SYNC_CR3
5176 *
5177 * @param pVM The cross context VM structure.
5178 * @param GCPhys Where to start. Page aligned.
5179 * @param cb How much to change. Page aligned.
5180 * @param enmProt The new ROM protection.
5181 */
5182VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
5183{
5184 LogFlow(("PGMR3PhysRomProtect: GCPhys=%RGp cb=%RGp enmProt=%d\n", GCPhys, cb, enmProt));
5185
5186 /*
5187 * Check input
5188 */
5189 if (!cb)
5190 return VINF_SUCCESS;
5191 AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
5192 AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
5193 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
5194 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
5195 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
5196
5197 /*
5198 * Process the request.
5199 */
5200 PGM_LOCK_VOID(pVM);
5201 int rc = VINF_SUCCESS;
5202 bool fFlushTLB = false;
5203 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
5204 for (uint32_t idx = 0; idx < cRomRanges; idx++)
5205 {
5206 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
5207 if ( GCPhys <= pRom->GCPhysLast
5208 && GCPhysLast >= pRom->GCPhys
5209 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
5210 {
5211 /*
5212 * Iterate the relevant pages and make necessary the changes.
5213 */
5214#ifdef VBOX_WITH_NATIVE_NEM
5215 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
5216 AssertPtrReturn(pRam, VERR_INTERNAL_ERROR_3);
5217#endif
5218 bool fChanges = false;
5219 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
5220 ? pRom->cb >> GUEST_PAGE_SHIFT
5221 : (GCPhysLast - pRom->GCPhys + 1) >> GUEST_PAGE_SHIFT;
5222 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
5223 iPage < cPages;
5224 iPage++)
5225 {
5226 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
5227 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
5228 {
5229 fChanges = true;
5230
5231 /* flush references to the page. */
5232 RTGCPHYS const GCPhysPage = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
5233 PPGMPAGE pRamPage = pgmPhysGetPage(pVM, GCPhysPage);
5234 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pRamPage, true /*fFlushPTEs*/, &fFlushTLB);
5235 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
5236 rc = rc2;
5237#ifdef VBOX_WITH_NATIVE_NEM
5238 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pRamPage);
5239#endif
5240
5241 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
5242 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
5243
5244 *pOld = *pRamPage;
5245 *pRamPage = *pNew;
5246 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
5247
5248#ifdef VBOX_WITH_NATIVE_NEM
5249# ifdef VBOX_WITH_PGM_NEM_MODE
5250 /* In simplified mode we have to switch the page data around too. */
5251 if (PGM_IS_IN_NEM_MODE(pVM))
5252 {
5253 uint8_t abPage[GUEST_PAGE_SIZE];
5254 uint8_t * const pbRamPage = PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage);
5255 memcpy(abPage, &pRom->pbR3Alternate[(size_t)iPage << GUEST_PAGE_SHIFT], sizeof(abPage));
5256 memcpy(&pRom->pbR3Alternate[(size_t)iPage << GUEST_PAGE_SHIFT], pbRamPage, sizeof(abPage));
5257 memcpy(pbRamPage, abPage, sizeof(abPage));
5258 }
5259# endif
5260 /* Tell NEM about the backing and protection change. */
5261 if (VM_IS_NEM_ENABLED(pVM))
5262 {
5263 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pNew);
5264 NEMHCNotifyPhysPageChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pOld), PGM_PAGE_GET_HCPHYS(pNew),
5265 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
5266 pgmPhysPageCalcNemProtection(pRamPage, enmType), enmType, &u2State);
5267 PGM_PAGE_SET_NEM_STATE(pRamPage, u2State);
5268 }
5269#endif
5270 }
5271 pRomPage->enmProt = enmProt;
5272 }
5273
5274 /*
5275 * Reset the access handler if we made changes, no need to optimize this.
5276 */
5277 if (fChanges)
5278 {
5279 int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
5280 if (RT_FAILURE(rc2))
5281 {
5282 PGM_UNLOCK(pVM);
5283 AssertRC(rc);
5284 return rc2;
5285 }
5286
5287 /* Explicitly flush IEM. Not sure if this is really necessary, but better
5288 be on the safe side. This shouldn't be a high volume flush source. */
5289 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_ROM_PROTECT);
5290 }
5291
5292 /* Advance - cb isn't updated. */
5293 GCPhys = pRom->GCPhys + (cPages << GUEST_PAGE_SHIFT);
5294 }
5295 }
5296 PGM_UNLOCK(pVM);
5297 if (fFlushTLB)
5298 PGM_INVL_ALL_VCPU_TLBS(pVM);
5299
5300 return rc;
5301}
5302
5303
5304
5305/*********************************************************************************************************************************
5306* Ballooning *
5307*********************************************************************************************************************************/
5308
5309#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
5310
5311/**
5312 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
5313 *
5314 * This is only called on one of the EMTs while the other ones are waiting for
5315 * it to complete this function.
5316 *
5317 * @returns VINF_SUCCESS (VBox strict status code).
5318 * @param pVM The cross context VM structure.
5319 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
5320 * @param pvUser User parameter
5321 */
5322static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
5323{
5324 uintptr_t *paUser = (uintptr_t *)pvUser;
5325 bool fInflate = !!paUser[0];
5326 unsigned cPages = paUser[1];
5327 RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
5328 uint32_t cPendingPages = 0;
5329 PGMMFREEPAGESREQ pReq;
5330 int rc;
5331
5332 Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
5333 PGM_LOCK_VOID(pVM);
5334
5335 if (fInflate)
5336 {
5337 /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
5338 pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
5339
5340 /* Replace pages with ZERO pages. */
5341 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
5342 if (RT_FAILURE(rc))
5343 {
5344 PGM_UNLOCK(pVM);
5345 AssertLogRelRC(rc);
5346 return rc;
5347 }
5348
5349 /* Iterate the pages. */
5350 for (unsigned i = 0; i < cPages; i++)
5351 {
5352 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
5353 if ( pPage == NULL
5354 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
5355 {
5356 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
5357 break;
5358 }
5359
5360 LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
5361
5362 /* Flush the shadow PT if this page was previously used as a guest page table. */
5363 pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
5364
5365 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i], (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
5366 if (RT_FAILURE(rc))
5367 {
5368 PGM_UNLOCK(pVM);
5369 AssertLogRelRC(rc);
5370 return rc;
5371 }
5372 Assert(PGM_PAGE_IS_ZERO(pPage));
5373 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
5374 }
5375
5376 if (cPendingPages)
5377 {
5378 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
5379 if (RT_FAILURE(rc))
5380 {
5381 PGM_UNLOCK(pVM);
5382 AssertLogRelRC(rc);
5383 return rc;
5384 }
5385 }
5386 GMMR3FreePagesCleanup(pReq);
5387 }
5388 else
5389 {
5390 /* Iterate the pages. */
5391 for (unsigned i = 0; i < cPages; i++)
5392 {
5393 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
5394 AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
5395
5396 LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
5397
5398 Assert(PGM_PAGE_IS_BALLOONED(pPage));
5399
5400 /* Change back to zero page. (NEM does not need to be informed.) */
5401 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
5402 }
5403
5404 /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
5405 }
5406
5407 /* Notify GMM about the balloon change. */
5408 rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
5409 if (RT_SUCCESS(rc))
5410 {
5411 if (!fInflate)
5412 {
5413 Assert(pVM->pgm.s.cBalloonedPages >= cPages);
5414 pVM->pgm.s.cBalloonedPages -= cPages;
5415 }
5416 else
5417 pVM->pgm.s.cBalloonedPages += cPages;
5418 }
5419
5420 PGM_UNLOCK(pVM);
5421
5422 /* Flush the recompiler's TLB as well. */
5423 for (VMCPUID i = 0; i < pVM->cCpus; i++)
5424 CPUMSetChangedFlags(pVM->apCpusR3[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
5425
5426 AssertLogRelRC(rc);
5427 return rc;
5428}
5429
5430
5431/**
5432 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
5433 *
5434 * @param pVM The cross context VM structure.
5435 * @param fInflate Inflate or deflate memory balloon
5436 * @param cPages Number of pages to free
5437 * @param paPhysPage Array of guest physical addresses
5438 */
5439static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
5440{
5441 uintptr_t paUser[3];
5442
5443 paUser[0] = fInflate;
5444 paUser[1] = cPages;
5445 paUser[2] = (uintptr_t)paPhysPage;
5446 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
5447 AssertRC(rc);
5448
5449 /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
5450 RTMemFree(paPhysPage);
5451}
5452
5453#endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
5454
5455/**
5456 * Inflate or deflate a memory balloon
5457 *
5458 * @returns VBox status code.
5459 * @param pVM The cross context VM structure.
5460 * @param fInflate Inflate or deflate memory balloon
5461 * @param cPages Number of pages to free
5462 * @param paPhysPage Array of guest physical addresses
5463 */
5464VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
5465{
5466 /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
5467#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
5468 int rc;
5469
5470 /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
5471 AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
5472
5473 /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
5474 * In the SMP case we post a request packet to postpone the job.
5475 */
5476 if (pVM->cCpus > 1)
5477 {
5478 unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
5479 RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
5480 AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
5481
5482 memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
5483
5484 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
5485 AssertRC(rc);
5486 }
5487 else
5488 {
5489 uintptr_t paUser[3];
5490
5491 paUser[0] = fInflate;
5492 paUser[1] = cPages;
5493 paUser[2] = (uintptr_t)paPhysPage;
5494 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
5495 AssertRC(rc);
5496 }
5497 return rc;
5498
5499#else
5500 NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
5501 return VERR_NOT_IMPLEMENTED;
5502#endif
5503}
5504
5505
5506
5507/*********************************************************************************************************************************
5508* Write Monitoring *
5509*********************************************************************************************************************************/
5510
5511/**
5512 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
5513 * physical RAM.
5514 *
5515 * This is only called on one of the EMTs while the other ones are waiting for
5516 * it to complete this function.
5517 *
5518 * @returns VINF_SUCCESS (VBox strict status code).
5519 * @param pVM The cross context VM structure.
5520 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
5521 * @param pvUser User parameter, unused.
5522 */
5523static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
5524{
5525 int rc = VINF_SUCCESS;
5526 NOREF(pvUser); NOREF(pVCpu);
5527
5528 PGM_LOCK_VOID(pVM);
5529#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
5530 pgmPoolResetDirtyPages(pVM);
5531#endif
5532
5533 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
5534 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
5535 {
5536 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
5537 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
5538 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
5539 AssertContinue(pRam);
5540
5541 uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
5542 for (uint32_t iPage = 0; iPage < cPages; iPage++)
5543 {
5544 PPGMPAGE const pPage = &pRam->aPages[iPage];
5545 PGMPAGETYPE const enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
5546
5547 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
5548 || enmPageType == PGMPAGETYPE_MMIO2)
5549 {
5550 /*
5551 * A RAM page.
5552 */
5553 switch (PGM_PAGE_GET_STATE(pPage))
5554 {
5555 case PGM_PAGE_STATE_ALLOCATED:
5556 /** @todo Optimize this: Don't always re-enable write
5557 * monitoring if the page is known to be very busy. */
5558 if (PGM_PAGE_IS_WRITTEN_TO(pPage))
5559 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
5560
5561 pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
5562 break;
5563
5564 case PGM_PAGE_STATE_SHARED:
5565 AssertFailed();
5566 break;
5567
5568 case PGM_PAGE_STATE_WRITE_MONITORED: /* nothing to change. */
5569 default:
5570 break;
5571 }
5572 }
5573 }
5574 }
5575 pgmR3PoolWriteProtectPages(pVM);
5576 PGM_INVL_ALL_VCPU_TLBS(pVM);
5577 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
5578 CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
5579
5580 PGM_UNLOCK(pVM);
5581 return rc;
5582}
5583
5584/**
5585 * Protect all physical RAM to monitor writes
5586 *
5587 * @returns VBox status code.
5588 * @param pVM The cross context VM structure.
5589 */
5590VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
5591{
5592 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
5593
5594 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
5595 AssertRC(rc);
5596 return rc;
5597}
5598
5599
5600/*********************************************************************************************************************************
5601* Stats. *
5602*********************************************************************************************************************************/
5603
5604/**
5605 * Query the amount of free memory inside VMMR0
5606 *
5607 * @returns VBox status code.
5608 * @param pUVM The user mode VM handle.
5609 * @param pcbAllocMem Where to return the amount of memory allocated
5610 * by VMs.
5611 * @param pcbFreeMem Where to return the amount of memory that is
5612 * allocated from the host but not currently used
5613 * by any VMs.
5614 * @param pcbBallonedMem Where to return the sum of memory that is
5615 * currently ballooned by the VMs.
5616 * @param pcbSharedMem Where to return the amount of memory that is
5617 * currently shared.
5618 */
5619VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
5620 uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
5621{
5622 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
5623 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
5624
5625 uint64_t cAllocPages = 0;
5626 uint64_t cFreePages = 0;
5627 uint64_t cBalloonPages = 0;
5628 uint64_t cSharedPages = 0;
5629 if (!SUPR3IsDriverless())
5630 {
5631 int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
5632 AssertRCReturn(rc, rc);
5633 }
5634
5635 if (pcbAllocMem)
5636 *pcbAllocMem = cAllocPages * _4K;
5637
5638 if (pcbFreeMem)
5639 *pcbFreeMem = cFreePages * _4K;
5640
5641 if (pcbBallonedMem)
5642 *pcbBallonedMem = cBalloonPages * _4K;
5643
5644 if (pcbSharedMem)
5645 *pcbSharedMem = cSharedPages * _4K;
5646
5647 Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
5648 cAllocPages, cFreePages, cBalloonPages, cSharedPages));
5649 return VINF_SUCCESS;
5650}
5651
5652
5653/**
5654 * Query memory stats for the VM.
5655 *
5656 * @returns VBox status code.
5657 * @param pUVM The user mode VM handle.
5658 * @param pcbTotalMem Where to return total amount memory the VM may
5659 * possibly use.
5660 * @param pcbPrivateMem Where to return the amount of private memory
5661 * currently allocated.
5662 * @param pcbSharedMem Where to return the amount of actually shared
5663 * memory currently used by the VM.
5664 * @param pcbZeroMem Where to return the amount of memory backed by
5665 * zero pages.
5666 *
5667 * @remarks The total mem is normally larger than the sum of the three
5668 * components. There are two reasons for this, first the amount of
5669 * shared memory is what we're sure is shared instead of what could
5670 * possibly be shared with someone. Secondly, because the total may
5671 * include some pure MMIO pages that doesn't go into any of the three
5672 * sub-counts.
5673 *
5674 * @todo Why do we return reused shared pages instead of anything that could
5675 * potentially be shared? Doesn't this mean the first VM gets a much
5676 * lower number of shared pages?
5677 */
5678VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
5679 uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
5680{
5681 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
5682 PVM pVM = pUVM->pVM;
5683 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
5684
5685 if (pcbTotalMem)
5686 *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * GUEST_PAGE_SIZE;
5687
5688 if (pcbPrivateMem)
5689 *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * GUEST_PAGE_SIZE;
5690
5691 if (pcbSharedMem)
5692 *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * GUEST_PAGE_SIZE;
5693
5694 if (pcbZeroMem)
5695 *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * GUEST_PAGE_SIZE;
5696
5697 Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
5698 return VINF_SUCCESS;
5699}
5700
5701
5702
5703/*********************************************************************************************************************************
5704* Chunk Mappings and Page Allocation *
5705*********************************************************************************************************************************/
5706
5707/**
5708 * Tree enumeration callback for dealing with age rollover.
5709 * It will perform a simple compression of the current age.
5710 */
5711static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
5712{
5713 /* Age compression - ASSUMES iNow == 4. */
5714 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
5715 if (pChunk->iLastUsed >= UINT32_C(0xffffff00))
5716 pChunk->iLastUsed = 3;
5717 else if (pChunk->iLastUsed >= UINT32_C(0xfffff000))
5718 pChunk->iLastUsed = 2;
5719 else if (pChunk->iLastUsed)
5720 pChunk->iLastUsed = 1;
5721 else /* iLastUsed = 0 */
5722 pChunk->iLastUsed = 4;
5723
5724 NOREF(pvUser);
5725 return 0;
5726}
5727
5728
5729/**
5730 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
5731 */
5732typedef struct PGMR3PHYSCHUNKUNMAPCB
5733{
5734 PVM pVM; /**< Pointer to the VM. */
5735 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
5736} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
5737
5738
5739/**
5740 * Callback used to find the mapping that's been unused for
5741 * the longest time.
5742 */
5743static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLU32NODECORE pNode, void *pvUser)
5744{
5745 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
5746 PPGMR3PHYSCHUNKUNMAPCB pArg = (PPGMR3PHYSCHUNKUNMAPCB)pvUser;
5747
5748 /*
5749 * Check for locks and compare when last used.
5750 */
5751 if (pChunk->cRefs)
5752 return 0;
5753 if (pChunk->cPermRefs)
5754 return 0;
5755 if ( pArg->pChunk
5756 && pChunk->iLastUsed >= pArg->pChunk->iLastUsed)
5757 return 0;
5758
5759 /*
5760 * Check that it's not in any of the TLBs.
5761 */
5762 PVM pVM = pArg->pVM;
5763 if ( pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(pChunk->Core.Key)].idChunk
5764 == pChunk->Core.Key)
5765 {
5766 pChunk = NULL;
5767 return 0;
5768 }
5769#ifdef VBOX_STRICT
5770 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
5771 {
5772 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk != pChunk);
5773 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key);
5774 }
5775#endif
5776
5777 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
5778 if (pVM->pgm.s.PhysTlbR3.aEntries[i].pMap == pChunk)
5779 return 0;
5780
5781 pArg->pChunk = pChunk;
5782 return 0;
5783}
5784
5785
5786/**
5787 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
5788 *
5789 * The candidate will not be part of any TLBs, so no need to flush
5790 * anything afterwards.
5791 *
5792 * @returns Chunk id.
5793 * @param pVM The cross context VM structure.
5794 */
5795static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
5796{
5797 PGM_LOCK_ASSERT_OWNER(pVM);
5798
5799 /*
5800 * Enumerate the age tree starting with the left most node.
5801 */
5802 STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
5803 PGMR3PHYSCHUNKUNMAPCB Args;
5804 Args.pVM = pVM;
5805 Args.pChunk = NULL;
5806 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, &Args);
5807 Assert(Args.pChunk);
5808 if (Args.pChunk)
5809 {
5810 Assert(Args.pChunk->cRefs == 0);
5811 Assert(Args.pChunk->cPermRefs == 0);
5812 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
5813 return Args.pChunk->Core.Key;
5814 }
5815
5816 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
5817 return INT32_MAX;
5818}
5819
5820
5821/**
5822 * Rendezvous callback used by pgmR3PhysUnmapChunk that unmaps a chunk
5823 *
5824 * This is only called on one of the EMTs while the other ones are waiting for
5825 * it to complete this function.
5826 *
5827 * @returns VINF_SUCCESS (VBox strict status code).
5828 * @param pVM The cross context VM structure.
5829 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
5830 * @param pvUser User pointer. Unused
5831 *
5832 */
5833static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysUnmapChunkRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
5834{
5835 int rc = VINF_SUCCESS;
5836 PGM_LOCK_VOID(pVM);
5837 NOREF(pVCpu); NOREF(pvUser);
5838
5839 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
5840 {
5841 /* Flush the pgm pool cache; call the internal rendezvous handler as we're already in a rendezvous handler here. */
5842 /** @todo also not really efficient to unmap a chunk that contains PD
5843 * or PT pages. */
5844 pgmR3PoolClearAllRendezvous(pVM, pVM->apCpusR3[0], NULL /* no need to flush the REM TLB as we already did that above */);
5845
5846 /*
5847 * Request the ring-0 part to unmap a chunk to make space in the mapping cache.
5848 */
5849 GMMMAPUNMAPCHUNKREQ Req;
5850 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
5851 Req.Hdr.cbReq = sizeof(Req);
5852 Req.pvR3 = NULL;
5853 Req.idChunkMap = NIL_GMM_CHUNKID;
5854 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
5855 if (Req.idChunkUnmap != INT32_MAX)
5856 {
5857 STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkUnmap, a);
5858 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
5859 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkUnmap, a);
5860 if (RT_SUCCESS(rc))
5861 {
5862 /*
5863 * Remove the unmapped one.
5864 */
5865 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
5866 AssertRelease(pUnmappedChunk);
5867 AssertRelease(!pUnmappedChunk->cRefs);
5868 AssertRelease(!pUnmappedChunk->cPermRefs);
5869 pUnmappedChunk->pv = NULL;
5870 pUnmappedChunk->Core.Key = UINT32_MAX;
5871 MMR3HeapFree(pUnmappedChunk);
5872 pVM->pgm.s.ChunkR3Map.c--;
5873 pVM->pgm.s.cUnmappedChunks++;
5874
5875 /*
5876 * Flush dangling PGM pointers (R3 & R0 ptrs to GC physical addresses).
5877 */
5878 /** @todo We should not flush chunks which include cr3 mappings. */
5879 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
5880 {
5881 PPGMCPU pPGM = &pVM->apCpusR3[idCpu]->pgm.s;
5882
5883 pPGM->pGst32BitPdR3 = NULL;
5884 pPGM->pGstPaePdptR3 = NULL;
5885 pPGM->pGstAmd64Pml4R3 = NULL;
5886 pPGM->pGstEptPml4R3 = NULL;
5887 pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
5888 pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
5889 pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
5890 pPGM->pGstEptPml4R0 = NIL_RTR0PTR;
5891 for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++)
5892 {
5893 pPGM->apGstPaePDsR3[i] = NULL;
5894 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
5895 }
5896
5897 /* Flush REM TLBs. */
5898 CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
5899 }
5900 }
5901 }
5902 }
5903 PGM_UNLOCK(pVM);
5904 return rc;
5905}
5906
5907/**
5908 * Unmap a chunk to free up virtual address space (request packet handler for pgmR3PhysChunkMap)
5909 *
5910 * @param pVM The cross context VM structure.
5911 */
5912static DECLCALLBACK(void) pgmR3PhysUnmapChunk(PVM pVM)
5913{
5914 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysUnmapChunkRendezvous, NULL);
5915 AssertRC(rc);
5916}
5917
5918
5919/**
5920 * Maps the given chunk into the ring-3 mapping cache.
5921 *
5922 * This will call ring-0.
5923 *
5924 * @returns VBox status code.
5925 * @param pVM The cross context VM structure.
5926 * @param idChunk The chunk in question.
5927 * @param ppChunk Where to store the chunk tracking structure.
5928 *
5929 * @remarks Called from within the PGM critical section.
5930 * @remarks Can be called from any thread!
5931 */
5932int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
5933{
5934 int rc;
5935
5936 PGM_LOCK_ASSERT_OWNER(pVM);
5937
5938 /*
5939 * Move the chunk time forward.
5940 */
5941 pVM->pgm.s.ChunkR3Map.iNow++;
5942 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
5943 {
5944 pVM->pgm.s.ChunkR3Map.iNow = 4;
5945 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
5946 }
5947
5948 /*
5949 * Allocate a new tracking structure first.
5950 */
5951 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
5952 AssertReturn(pChunk, VERR_NO_MEMORY);
5953 pChunk->Core.Key = idChunk;
5954 pChunk->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
5955
5956 /*
5957 * Request the ring-0 part to map the chunk in question.
5958 */
5959 GMMMAPUNMAPCHUNKREQ Req;
5960 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
5961 Req.Hdr.cbReq = sizeof(Req);
5962 Req.pvR3 = NULL;
5963 Req.idChunkMap = idChunk;
5964 Req.idChunkUnmap = NIL_GMM_CHUNKID;
5965
5966 /* Must be callable from any thread, so can't use VMMR3CallR0. */
5967 STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkMap, a);
5968 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
5969 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkMap, a);
5970 if (RT_SUCCESS(rc))
5971 {
5972 pChunk->pv = Req.pvR3;
5973
5974 /*
5975 * If we're running out of virtual address space, then we should
5976 * unmap another chunk.
5977 *
5978 * Currently, an unmap operation requires that all other virtual CPUs
5979 * are idling and not by chance making use of the memory we're
5980 * unmapping. So, we create an async unmap operation here.
5981 *
5982 * Now, when creating or restoring a saved state this wont work very
5983 * well since we may want to restore all guest RAM + a little something.
5984 * So, we have to do the unmap synchronously. Fortunately for us
5985 * though, during these operations the other virtual CPUs are inactive
5986 * and it should be safe to do this.
5987 */
5988 /** @todo Eventually we should lock all memory when used and do
5989 * map+unmap as one kernel call without any rendezvous or
5990 * other precautions. */
5991 if (pVM->pgm.s.ChunkR3Map.c + 1 >= pVM->pgm.s.ChunkR3Map.cMax)
5992 {
5993 switch (VMR3GetState(pVM))
5994 {
5995 case VMSTATE_LOADING:
5996 case VMSTATE_SAVING:
5997 {
5998 PVMCPU pVCpu = VMMGetCpu(pVM);
5999 if ( pVCpu
6000 && pVM->pgm.s.cDeprecatedPageLocks == 0)
6001 {
6002 pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
6003 break;
6004 }
6005 }
6006 RT_FALL_THRU();
6007 default:
6008 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
6009 AssertRC(rc);
6010 break;
6011 }
6012 }
6013
6014 /*
6015 * Update the tree. We must do this after any unmapping to make sure
6016 * the chunk we're going to return isn't unmapped by accident.
6017 */
6018 AssertPtr(Req.pvR3);
6019 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
6020 AssertRelease(fRc);
6021 pVM->pgm.s.ChunkR3Map.c++;
6022 pVM->pgm.s.cMappedChunks++;
6023 }
6024 else
6025 {
6026 /** @todo this may fail because of /proc/sys/vm/max_map_count, so we
6027 * should probably restrict ourselves on linux. */
6028 AssertRC(rc);
6029 MMR3HeapFree(pChunk);
6030 pChunk = NULL;
6031 }
6032
6033 *ppChunk = pChunk;
6034 return rc;
6035}
6036
6037
6038/**
6039 * Invalidates the TLB for the ring-3 mapping cache.
6040 *
6041 * @param pVM The cross context VM structure.
6042 */
6043VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
6044{
6045 PGM_LOCK_VOID(pVM);
6046 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
6047 {
6048 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
6049 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
6050 }
6051 /* The page map TLB references chunks, so invalidate that one too. */
6052 pgmPhysInvalidatePageMapTLB(pVM);
6053 PGM_UNLOCK(pVM);
6054}
6055
6056
6057/**
6058 * Response to VM_FF_PGM_NEED_HANDY_PAGES and helper for pgmPhysEnsureHandyPage.
6059 *
6060 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
6061 * signal and clear the out of memory condition. When called, this API is used
6062 * to try clear the condition when the user wants to resume.
6063 *
6064 * @returns The following VBox status codes.
6065 * @retval VINF_SUCCESS on success. FFs cleared.
6066 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
6067 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
6068 *
6069 * @param pVM The cross context VM structure.
6070 *
6071 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
6072 * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
6073 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
6074 * handler.
6075 */
6076VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
6077{
6078 PGM_LOCK_VOID(pVM);
6079
6080 /*
6081 * Allocate more pages, noting down the index of the first new page.
6082 */
6083 uint32_t iClear = pVM->pgm.s.cHandyPages;
6084 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_PGM_HANDY_PAGE_IPE);
6085 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
6086 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
6087 /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
6088 if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
6089 && pVM->pgm.s.cHandyPages > 0)
6090 {
6091 /* Still handy pages left, so don't panic. */
6092 rc = VINF_SUCCESS;
6093 }
6094
6095 if (RT_SUCCESS(rc))
6096 {
6097 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
6098 Assert(pVM->pgm.s.cHandyPages > 0);
6099#ifdef VBOX_STRICT
6100 uint32_t i;
6101 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++)
6102 if ( pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID
6103 || pVM->pgm.s.aHandyPages[i].idSharedPage != NIL_GMM_PAGEID
6104 || (pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & GUEST_PAGE_OFFSET_MASK))
6105 break;
6106 if (i != pVM->pgm.s.cHandyPages)
6107 {
6108 RTAssertMsg1Weak(NULL, __LINE__, __FILE__, __FUNCTION__);
6109 RTAssertMsg2Weak("i=%d iClear=%d cHandyPages=%d\n", i, iClear, pVM->pgm.s.cHandyPages);
6110 for (uint32_t j = iClear; j < pVM->pgm.s.cHandyPages; j++)
6111 RTAssertMsg2Add("%03d: idPage=%d HCPhysGCPhys=%RHp idSharedPage=%d%s\n", j,
6112 pVM->pgm.s.aHandyPages[j].idPage,
6113 pVM->pgm.s.aHandyPages[j].HCPhysGCPhys,
6114 pVM->pgm.s.aHandyPages[j].idSharedPage,
6115 j == i ? " <---" : "");
6116 RTAssertPanic();
6117 }
6118#endif
6119 }
6120 else
6121 {
6122 /*
6123 * We should never get here unless there is a genuine shortage of
6124 * memory (or some internal error). Flag the error so the VM can be
6125 * suspended ASAP and the user informed. If we're totally out of
6126 * handy pages we will return failure.
6127 */
6128 /* Report the failure. */
6129 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc cHandyPages=%#x\n"
6130 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
6131 rc, pVM->pgm.s.cHandyPages,
6132 pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cSharedPages, pVM->pgm.s.cZeroPages));
6133
6134 if ( rc != VERR_NO_MEMORY
6135 && rc != VERR_NO_PHYS_MEMORY
6136 && rc != VERR_LOCK_FAILED)
6137 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
6138 {
6139 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
6140 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
6141 pVM->pgm.s.aHandyPages[i].idSharedPage));
6142 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
6143 if (idPage != NIL_GMM_PAGEID)
6144 {
6145 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
6146 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
6147 {
6148 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
6149 Assert(pRam || idRamRange == 0);
6150 if (!pRam) continue;
6151 Assert(pRam->idRange == idRamRange);
6152
6153 uint32_t const cPages = pRam->cb >> GUEST_PAGE_SHIFT;
6154 for (uint32_t iPage = 0; iPage < cPages; iPage++)
6155 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
6156 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
6157 pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
6158 }
6159 }
6160 }
6161
6162 if (rc == VERR_NO_MEMORY)
6163 {
6164 uint64_t cbHostRamAvail = 0;
6165 int rc2 = RTSystemQueryAvailableRam(&cbHostRamAvail);
6166 if (RT_SUCCESS(rc2))
6167 LogRel(("Host RAM: %RU64MB available\n", cbHostRamAvail / _1M));
6168 else
6169 LogRel(("Cannot determine the amount of available host memory\n"));
6170 }
6171
6172 /* Set the FFs and adjust rc. */
6173 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
6174 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
6175 if ( rc == VERR_NO_MEMORY
6176 || rc == VERR_NO_PHYS_MEMORY
6177 || rc == VERR_LOCK_FAILED)
6178 rc = VINF_EM_NO_MEMORY;
6179 }
6180
6181 PGM_UNLOCK(pVM);
6182 return rc;
6183}
6184
6185
6186/*********************************************************************************************************************************
6187* Other Stuff *
6188*********************************************************************************************************************************/
6189
6190#if !defined(VBOX_VMM_TARGET_ARMV8)
6191/**
6192 * Sets the Address Gate 20 state.
6193 *
6194 * @param pVCpu The cross context virtual CPU structure.
6195 * @param fEnable True if the gate should be enabled.
6196 * False if the gate should be disabled.
6197 */
6198VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
6199{
6200 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
6201 if (pVCpu->pgm.s.fA20Enabled != fEnable)
6202 {
6203#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6204 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
6205 if ( CPUMIsGuestInVmxRootMode(pCtx)
6206 && !fEnable)
6207 {
6208 Log(("Cannot enter A20M mode while in VMX root mode\n"));
6209 return;
6210 }
6211#endif
6212 pVCpu->pgm.s.fA20Enabled = fEnable;
6213 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
6214 if (VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)))
6215 NEMR3NotifySetA20(pVCpu, fEnable);
6216#ifdef PGM_WITH_A20
6217 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
6218 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
6219 HMFlushTlb(pVCpu);
6220#endif
6221#if 0 /* PGMGetPage will apply the A20 mask to the GCPhys it returns, so we must invalid both sides of the TLB. */
6222 IEMTlbInvalidateAllPhysical(pVCpu);
6223#else
6224 IEMTlbInvalidateAll(pVCpu);
6225#endif
6226 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
6227 }
6228}
6229#endif
6230
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette