VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp@ 39414

Last change on this file since 39414 was 39402, checked in by vboxsync, 13 years ago

VMM: don't use generic IPE status codes, use specific ones. Part 1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 164.0 KB
Line 
1/* $Id: PGMPhys.cpp 39402 2011-11-23 16:25:04Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/iom.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/rem.h>
28#include <VBox/vmm/pdmdev.h>
29#include "PGMInternal.h"
30#include <VBox/vmm/vm.h>
31#include "PGMInline.h"
32#include <VBox/sup.h>
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <iprt/thread.h>
40#include <iprt/string.h>
41#include <iprt/system.h>
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number of pages to free in one batch. */
48#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
49
50
51/*******************************************************************************
52* Internal Functions *
53*******************************************************************************/
54static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
55
56
57/*
58 * PGMR3PhysReadU8-64
59 * PGMR3PhysWriteU8-64
60 */
61#define PGMPHYSFN_READNAME PGMR3PhysReadU8
62#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
63#define PGMPHYS_DATASIZE 1
64#define PGMPHYS_DATATYPE uint8_t
65#include "PGMPhysRWTmpl.h"
66
67#define PGMPHYSFN_READNAME PGMR3PhysReadU16
68#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
69#define PGMPHYS_DATASIZE 2
70#define PGMPHYS_DATATYPE uint16_t
71#include "PGMPhysRWTmpl.h"
72
73#define PGMPHYSFN_READNAME PGMR3PhysReadU32
74#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
75#define PGMPHYS_DATASIZE 4
76#define PGMPHYS_DATATYPE uint32_t
77#include "PGMPhysRWTmpl.h"
78
79#define PGMPHYSFN_READNAME PGMR3PhysReadU64
80#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
81#define PGMPHYS_DATASIZE 8
82#define PGMPHYS_DATATYPE uint64_t
83#include "PGMPhysRWTmpl.h"
84
85
86/**
87 * EMT worker for PGMR3PhysReadExternal.
88 */
89static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
90{
91 PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead);
92 return VINF_SUCCESS;
93}
94
95
96/**
97 * Read from physical memory, external users.
98 *
99 * @returns VBox status code.
100 * @retval VINF_SUCCESS.
101 *
102 * @param pVM VM Handle.
103 * @param GCPhys Physical address to read from.
104 * @param pvBuf Where to read into.
105 * @param cbRead How many bytes to read.
106 *
107 * @thread Any but EMTs.
108 */
109VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
110{
111 VM_ASSERT_OTHER_THREAD(pVM);
112
113 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
114 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
115
116 pgmLock(pVM);
117
118 /*
119 * Copy loop on ram ranges.
120 */
121 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
122 for (;;)
123 {
124 /* Inside range or not? */
125 if (pRam && GCPhys >= pRam->GCPhys)
126 {
127 /*
128 * Must work our way thru this page by page.
129 */
130 RTGCPHYS off = GCPhys - pRam->GCPhys;
131 while (off < pRam->cb)
132 {
133 unsigned iPage = off >> PAGE_SHIFT;
134 PPGMPAGE pPage = &pRam->aPages[iPage];
135
136 /*
137 * If the page has an ALL access handler, we'll have to
138 * delegate the job to EMT.
139 */
140 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
141 {
142 pgmUnlock(pVM);
143
144 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 4,
145 pVM, &GCPhys, pvBuf, cbRead);
146 }
147 Assert(!PGM_PAGE_IS_MMIO(pPage));
148
149 /*
150 * Simple stuff, go ahead.
151 */
152 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
153 if (cb > cbRead)
154 cb = cbRead;
155 PGMPAGEMAPLOCK PgMpLck;
156 const void *pvSrc;
157 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
158 if (RT_SUCCESS(rc))
159 {
160 memcpy(pvBuf, pvSrc, cb);
161 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
162 }
163 else
164 {
165 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
166 pRam->GCPhys + off, pPage, rc));
167 memset(pvBuf, 0xff, cb);
168 }
169
170 /* next page */
171 if (cb >= cbRead)
172 {
173 pgmUnlock(pVM);
174 return VINF_SUCCESS;
175 }
176 cbRead -= cb;
177 off += cb;
178 GCPhys += cb;
179 pvBuf = (char *)pvBuf + cb;
180 } /* walk pages in ram range. */
181 }
182 else
183 {
184 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
185
186 /*
187 * Unassigned address space.
188 */
189 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
190 if (cb >= cbRead)
191 {
192 memset(pvBuf, 0xff, cbRead);
193 break;
194 }
195 memset(pvBuf, 0xff, cb);
196
197 cbRead -= cb;
198 pvBuf = (char *)pvBuf + cb;
199 GCPhys += cb;
200 }
201
202 /* Advance range if necessary. */
203 while (pRam && GCPhys > pRam->GCPhysLast)
204 pRam = pRam->CTX_SUFF(pNext);
205 } /* Ram range walk */
206
207 pgmUnlock(pVM);
208
209 return VINF_SUCCESS;
210}
211
212
213/**
214 * EMT worker for PGMR3PhysWriteExternal.
215 */
216static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
217{
218 /** @todo VERR_EM_NO_MEMORY */
219 PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite);
220 return VINF_SUCCESS;
221}
222
223
224/**
225 * Write to physical memory, external users.
226 *
227 * @returns VBox status code.
228 * @retval VINF_SUCCESS.
229 * @retval VERR_EM_NO_MEMORY.
230 *
231 * @param pVM VM Handle.
232 * @param GCPhys Physical address to write to.
233 * @param pvBuf What to write.
234 * @param cbWrite How many bytes to write.
235 * @param pszWho Who is writing. For tracking down who is writing
236 * after we've saved the state.
237 *
238 * @thread Any but EMTs.
239 */
240VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, const char *pszWho)
241{
242 VM_ASSERT_OTHER_THREAD(pVM);
243
244 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
245 ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x pszWho=%s\n",
246 GCPhys, cbWrite, pszWho));
247 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
248 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
249
250 pgmLock(pVM);
251
252 /*
253 * Copy loop on ram ranges, stop when we hit something difficult.
254 */
255 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
256 for (;;)
257 {
258 /* Inside range or not? */
259 if (pRam && GCPhys >= pRam->GCPhys)
260 {
261 /*
262 * Must work our way thru this page by page.
263 */
264 RTGCPTR off = GCPhys - pRam->GCPhys;
265 while (off < pRam->cb)
266 {
267 RTGCPTR iPage = off >> PAGE_SHIFT;
268 PPGMPAGE pPage = &pRam->aPages[iPage];
269
270 /*
271 * Is the page problematic, we have to do the work on the EMT.
272 *
273 * Allocating writable pages and access handlers are
274 * problematic, write monitored pages are simple and can be
275 * dealt with here.
276 */
277 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
278 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
279 {
280 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
281 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
282 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
283 else
284 {
285 pgmUnlock(pVM);
286
287 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 4,
288 pVM, &GCPhys, pvBuf, cbWrite);
289 }
290 }
291 Assert(!PGM_PAGE_IS_MMIO(pPage));
292
293 /*
294 * Simple stuff, go ahead.
295 */
296 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
297 if (cb > cbWrite)
298 cb = cbWrite;
299 PGMPAGEMAPLOCK PgMpLck;
300 void *pvDst;
301 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
302 if (RT_SUCCESS(rc))
303 {
304 memcpy(pvDst, pvBuf, cb);
305 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
306 }
307 else
308 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
309 pRam->GCPhys + off, pPage, rc));
310
311 /* next page */
312 if (cb >= cbWrite)
313 {
314 pgmUnlock(pVM);
315 return VINF_SUCCESS;
316 }
317
318 cbWrite -= cb;
319 off += cb;
320 GCPhys += cb;
321 pvBuf = (const char *)pvBuf + cb;
322 } /* walk pages in ram range */
323 }
324 else
325 {
326 /*
327 * Unassigned address space, skip it.
328 */
329 if (!pRam)
330 break;
331 size_t cb = pRam->GCPhys - GCPhys;
332 if (cb >= cbWrite)
333 break;
334 cbWrite -= cb;
335 pvBuf = (const char *)pvBuf + cb;
336 GCPhys += cb;
337 }
338
339 /* Advance range if necessary. */
340 while (pRam && GCPhys > pRam->GCPhysLast)
341 pRam = pRam->CTX_SUFF(pNext);
342 } /* Ram range walk */
343
344 pgmUnlock(pVM);
345 return VINF_SUCCESS;
346}
347
348
349/**
350 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
351 *
352 * @returns see PGMR3PhysGCPhys2CCPtrExternal
353 * @param pVM The VM handle.
354 * @param pGCPhys Pointer to the guest physical address.
355 * @param ppv Where to store the mapping address.
356 * @param pLock Where to store the lock.
357 */
358static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
359{
360 /*
361 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
362 * an access handler after it succeeds.
363 */
364 int rc = pgmLock(pVM);
365 AssertRCReturn(rc, rc);
366
367 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
368 if (RT_SUCCESS(rc))
369 {
370 PPGMPAGEMAPTLBE pTlbe;
371 int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
372 AssertFatalRC(rc2);
373 PPGMPAGE pPage = pTlbe->pPage;
374 if (PGM_PAGE_IS_MMIO(pPage))
375 {
376 PGMPhysReleasePageMappingLock(pVM, pLock);
377 rc = VERR_PGM_PHYS_PAGE_RESERVED;
378 }
379 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
380#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
381 || pgmPoolIsDirtyPage(pVM, *pGCPhys)
382#endif
383 )
384 {
385 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
386 * not be informed about writes and keep bogus gst->shw mappings around.
387 */
388 pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
389 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
390 /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
391 * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
392 }
393 }
394
395 pgmUnlock(pVM);
396 return rc;
397}
398
399
400/**
401 * Requests the mapping of a guest page into ring-3, external threads.
402 *
403 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
404 * release it.
405 *
406 * This API will assume your intention is to write to the page, and will
407 * therefore replace shared and zero pages. If you do not intend to modify the
408 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
409 *
410 * @returns VBox status code.
411 * @retval VINF_SUCCESS on success.
412 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
413 * backing or if the page has any active access handlers. The caller
414 * must fall back on using PGMR3PhysWriteExternal.
415 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
416 *
417 * @param pVM The VM handle.
418 * @param GCPhys The guest physical address of the page that should be mapped.
419 * @param ppv Where to store the address corresponding to GCPhys.
420 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
421 *
422 * @remark Avoid calling this API from within critical sections (other than the
423 * PGM one) because of the deadlock risk when we have to delegating the
424 * task to an EMT.
425 * @thread Any.
426 */
427VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
428{
429 AssertPtr(ppv);
430 AssertPtr(pLock);
431
432 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
433
434 int rc = pgmLock(pVM);
435 AssertRCReturn(rc, rc);
436
437 /*
438 * Query the Physical TLB entry for the page (may fail).
439 */
440 PPGMPAGEMAPTLBE pTlbe;
441 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
442 if (RT_SUCCESS(rc))
443 {
444 PPGMPAGE pPage = pTlbe->pPage;
445 if (PGM_PAGE_IS_MMIO(pPage))
446 rc = VERR_PGM_PHYS_PAGE_RESERVED;
447 else
448 {
449 /*
450 * If the page is shared, the zero page, or being write monitored
451 * it must be converted to an page that's writable if possible.
452 * We can only deal with write monitored pages here, the rest have
453 * to be on an EMT.
454 */
455 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
456 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
457#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
458 || pgmPoolIsDirtyPage(pVM, GCPhys)
459#endif
460 )
461 {
462 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
463 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
464#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
465 && !pgmPoolIsDirtyPage(pVM, GCPhys)
466#endif
467 )
468 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
469 else
470 {
471 pgmUnlock(pVM);
472
473 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
474 pVM, &GCPhys, ppv, pLock);
475 }
476 }
477
478 /*
479 * Now, just perform the locking and calculate the return address.
480 */
481 PPGMPAGEMAP pMap = pTlbe->pMap;
482 if (pMap)
483 pMap->cRefs++;
484
485 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
486 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
487 {
488 if (cLocks == 0)
489 pVM->pgm.s.cWriteLockedPages++;
490 PGM_PAGE_INC_WRITE_LOCKS(pPage);
491 }
492 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
493 {
494 PGM_PAGE_INC_WRITE_LOCKS(pPage);
495 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
496 if (pMap)
497 pMap->cRefs++; /* Extra ref to prevent it from going away. */
498 }
499
500 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
501 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
502 pLock->pvMap = pMap;
503 }
504 }
505
506 pgmUnlock(pVM);
507 return rc;
508}
509
510
511/**
512 * Requests the mapping of a guest page into ring-3, external threads.
513 *
514 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
515 * release it.
516 *
517 * @returns VBox status code.
518 * @retval VINF_SUCCESS on success.
519 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
520 * backing or if the page as an active ALL access handler. The caller
521 * must fall back on using PGMPhysRead.
522 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
523 *
524 * @param pVM The VM handle.
525 * @param GCPhys The guest physical address of the page that should be mapped.
526 * @param ppv Where to store the address corresponding to GCPhys.
527 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
528 *
529 * @remark Avoid calling this API from within critical sections (other than
530 * the PGM one) because of the deadlock risk.
531 * @thread Any.
532 */
533VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
534{
535 int rc = pgmLock(pVM);
536 AssertRCReturn(rc, rc);
537
538 /*
539 * Query the Physical TLB entry for the page (may fail).
540 */
541 PPGMPAGEMAPTLBE pTlbe;
542 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
543 if (RT_SUCCESS(rc))
544 {
545 PPGMPAGE pPage = pTlbe->pPage;
546#if 1
547 /* MMIO pages doesn't have any readable backing. */
548 if (PGM_PAGE_IS_MMIO(pPage))
549 rc = VERR_PGM_PHYS_PAGE_RESERVED;
550#else
551 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
552 rc = VERR_PGM_PHYS_PAGE_RESERVED;
553#endif
554 else
555 {
556 /*
557 * Now, just perform the locking and calculate the return address.
558 */
559 PPGMPAGEMAP pMap = pTlbe->pMap;
560 if (pMap)
561 pMap->cRefs++;
562
563 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
564 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
565 {
566 if (cLocks == 0)
567 pVM->pgm.s.cReadLockedPages++;
568 PGM_PAGE_INC_READ_LOCKS(pPage);
569 }
570 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
571 {
572 PGM_PAGE_INC_READ_LOCKS(pPage);
573 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
574 if (pMap)
575 pMap->cRefs++; /* Extra ref to prevent it from going away. */
576 }
577
578 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
579 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
580 pLock->pvMap = pMap;
581 }
582 }
583
584 pgmUnlock(pVM);
585 return rc;
586}
587
588
589#define MAKE_LEAF(a_pNode) \
590 do { \
591 (a_pNode)->pLeftR3 = NIL_RTR3PTR; \
592 (a_pNode)->pRightR3 = NIL_RTR3PTR; \
593 (a_pNode)->pLeftR0 = NIL_RTR0PTR; \
594 (a_pNode)->pRightR0 = NIL_RTR0PTR; \
595 (a_pNode)->pLeftRC = NIL_RTRCPTR; \
596 (a_pNode)->pRightRC = NIL_RTRCPTR; \
597 } while (0)
598
599#define INSERT_LEFT(a_pParent, a_pNode) \
600 do { \
601 (a_pParent)->pLeftR3 = (a_pNode); \
602 (a_pParent)->pLeftR0 = (a_pNode)->pSelfR0; \
603 (a_pParent)->pLeftRC = (a_pNode)->pSelfRC; \
604 } while (0)
605#define INSERT_RIGHT(a_pParent, a_pNode) \
606 do { \
607 (a_pParent)->pRightR3 = (a_pNode); \
608 (a_pParent)->pRightR0 = (a_pNode)->pSelfR0; \
609 (a_pParent)->pRightRC = (a_pNode)->pSelfRC; \
610 } while (0)
611
612
613/**
614 * Recursive tree builder.
615 *
616 * @param ppRam Pointer to the iterator variable.
617 * @param iHeight The hight about normal leaf nodes. Inserts a leaf
618 * node if 0.
619 */
620static PPGMRAMRANGE pgmR3PhysRebuildRamRangeSearchTreesRecursively(PPGMRAMRANGE *ppRam, int iDepth)
621{
622 PPGMRAMRANGE pRam;
623 if (iDepth <= 0)
624 {
625 /*
626 * Leaf node.
627 */
628 pRam = *ppRam;
629 if (pRam)
630 {
631 *ppRam = pRam->pNextR3;
632 MAKE_LEAF(pRam);
633 }
634 }
635 else
636 {
637
638 /*
639 * Intermediate node.
640 */
641 PPGMRAMRANGE pLeft = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
642
643 pRam = *ppRam;
644 if (!pRam)
645 return pLeft;
646 *ppRam = pRam->pNextR3;
647 MAKE_LEAF(pRam);
648 INSERT_LEFT(pRam, pLeft);
649
650 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
651 if (pRight)
652 INSERT_RIGHT(pRam, pRight);
653 }
654 return pRam;
655}
656
657
658/**
659 * Rebuilds the RAM range search trees.
660 *
661 * @param pVM The VM handle.
662 */
663static void pgmR3PhysRebuildRamRangeSearchTrees(PVM pVM)
664{
665
666 /*
667 * Create the reasonably balanced tree in a sequential fashion.
668 * For simplicity (laziness) we use standard recursion here.
669 */
670 int iDepth = 0;
671 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
672 PPGMRAMRANGE pRoot = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, 0);
673 while (pRam)
674 {
675 PPGMRAMRANGE pLeft = pRoot;
676
677 pRoot = pRam;
678 pRam = pRam->pNextR3;
679 MAKE_LEAF(pRoot);
680 INSERT_LEFT(pRoot, pLeft);
681
682 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, iDepth);
683 if (pRight)
684 INSERT_RIGHT(pRoot, pRight);
685 /** @todo else: rotate the tree. */
686
687 iDepth++;
688 }
689
690 pVM->pgm.s.pRamRangeTreeR3 = pRoot;
691 pVM->pgm.s.pRamRangeTreeR0 = pRoot ? pRoot->pSelfR0 : NIL_RTR0PTR;
692 pVM->pgm.s.pRamRangeTreeRC = pRoot ? pRoot->pSelfRC : NIL_RTRCPTR;
693
694#ifdef VBOX_STRICT
695 /*
696 * Verify that the above code works.
697 */
698 unsigned cRanges = 0;
699 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
700 cRanges++;
701 Assert(cRanges > 0);
702
703 unsigned cMaxDepth = ASMBitLastSetU32(cRanges);
704 if ((1U << cMaxDepth) < cRanges)
705 cMaxDepth++;
706
707 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
708 {
709 unsigned cDepth = 0;
710 PPGMRAMRANGE pRam2 = pVM->pgm.s.pRamRangeTreeR3;
711 for (;;)
712 {
713 if (pRam == pRam2)
714 break;
715 Assert(pRam2);
716 if (pRam->GCPhys < pRam2->GCPhys)
717 pRam2 = pRam2->pLeftR3;
718 else
719 pRam2 = pRam2->pRightR3;
720 }
721 AssertMsg(cDepth <= cMaxDepth, ("cDepth=%d cMaxDepth=%d\n", cDepth, cMaxDepth));
722 }
723#endif /* VBOX_STRICT */
724}
725
726#undef MAKE_LEAF
727#undef INSERT_LEFT
728#undef INSERT_RIGHT
729
730/**
731 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
732 *
733 * Called when anything was relocated.
734 *
735 * @param pVM Pointer to the shared VM structure.
736 */
737void pgmR3PhysRelinkRamRanges(PVM pVM)
738{
739 PPGMRAMRANGE pCur;
740
741#ifdef VBOX_STRICT
742 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
743 {
744 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
745 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
746 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
747 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
748 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
749 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
750 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3)
751 Assert( pCur2 == pCur
752 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
753 }
754#endif
755
756 pCur = pVM->pgm.s.pRamRangesXR3;
757 if (pCur)
758 {
759 pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0;
760 pVM->pgm.s.pRamRangesXRC = pCur->pSelfRC;
761
762 for (; pCur->pNextR3; pCur = pCur->pNextR3)
763 {
764 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
765 pCur->pNextRC = pCur->pNextR3->pSelfRC;
766 }
767
768 Assert(pCur->pNextR0 == NIL_RTR0PTR);
769 Assert(pCur->pNextRC == NIL_RTRCPTR);
770 }
771 else
772 {
773 Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR);
774 Assert(pVM->pgm.s.pRamRangesXRC == NIL_RTRCPTR);
775 }
776 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
777
778 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
779}
780
781
782/**
783 * Links a new RAM range into the list.
784 *
785 * @param pVM Pointer to the shared VM structure.
786 * @param pNew Pointer to the new list entry.
787 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
788 */
789static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
790{
791 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
792 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
793 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
794
795 pgmLock(pVM);
796
797 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3;
798 pNew->pNextR3 = pRam;
799 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
800 pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
801
802 if (pPrev)
803 {
804 pPrev->pNextR3 = pNew;
805 pPrev->pNextR0 = pNew->pSelfR0;
806 pPrev->pNextRC = pNew->pSelfRC;
807 }
808 else
809 {
810 pVM->pgm.s.pRamRangesXR3 = pNew;
811 pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0;
812 pVM->pgm.s.pRamRangesXRC = pNew->pSelfRC;
813 }
814 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
815
816 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
817 pgmUnlock(pVM);
818}
819
820
821/**
822 * Unlink an existing RAM range from the list.
823 *
824 * @param pVM Pointer to the shared VM structure.
825 * @param pRam Pointer to the new list entry.
826 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
827 */
828static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
829{
830 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam);
831 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
832 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
833
834 pgmLock(pVM);
835
836 PPGMRAMRANGE pNext = pRam->pNextR3;
837 if (pPrev)
838 {
839 pPrev->pNextR3 = pNext;
840 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
841 pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
842 }
843 else
844 {
845 Assert(pVM->pgm.s.pRamRangesXR3 == pRam);
846 pVM->pgm.s.pRamRangesXR3 = pNext;
847 pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
848 pVM->pgm.s.pRamRangesXRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
849 }
850 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
851
852 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
853 pgmUnlock(pVM);
854}
855
856
857/**
858 * Unlink an existing RAM range from the list.
859 *
860 * @param pVM Pointer to the shared VM structure.
861 * @param pRam Pointer to the new list entry.
862 */
863static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
864{
865 pgmLock(pVM);
866
867 /* find prev. */
868 PPGMRAMRANGE pPrev = NULL;
869 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3;
870 while (pCur != pRam)
871 {
872 pPrev = pCur;
873 pCur = pCur->pNextR3;
874 }
875 AssertFatal(pCur);
876
877 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
878 pgmUnlock(pVM);
879}
880
881
882/**
883 * Frees a range of pages, replacing them with ZERO pages of the specified type.
884 *
885 * @returns VBox status code.
886 * @param pVM The VM handle.
887 * @param pRam The RAM range in which the pages resides.
888 * @param GCPhys The address of the first page.
889 * @param GCPhysLast The address of the last page.
890 * @param uType The page type to replace then with.
891 */
892static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
893{
894 PGM_LOCK_ASSERT_OWNER(pVM);
895 uint32_t cPendingPages = 0;
896 PGMMFREEPAGESREQ pReq;
897 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
898 AssertLogRelRCReturn(rc, rc);
899
900 /* Iterate the pages. */
901 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
902 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
903 while (cPagesLeft-- > 0)
904 {
905 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
906 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
907
908 PGM_PAGE_SET_TYPE(pVM, pPageDst, uType);
909
910 GCPhys += PAGE_SIZE;
911 pPageDst++;
912 }
913
914 if (cPendingPages)
915 {
916 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
917 AssertLogRelRCReturn(rc, rc);
918 }
919 GMMR3FreePagesCleanup(pReq);
920
921 return rc;
922}
923
924#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
925
926/**
927 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
928 *
929 * This is only called on one of the EMTs while the other ones are waiting for
930 * it to complete this function.
931 *
932 * @returns VINF_SUCCESS (VBox strict status code).
933 * @param pVM The VM handle.
934 * @param pVCpu The VMCPU for the EMT we're being called on. Unused.
935 * @param pvUser User parameter
936 */
937static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
938{
939 uintptr_t *paUser = (uintptr_t *)pvUser;
940 bool fInflate = !!paUser[0];
941 unsigned cPages = paUser[1];
942 RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
943 uint32_t cPendingPages = 0;
944 PGMMFREEPAGESREQ pReq;
945 int rc;
946
947 Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
948 pgmLock(pVM);
949
950 if (fInflate)
951 {
952 /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
953 pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
954
955 /* Replace pages with ZERO pages. */
956 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
957 if (RT_FAILURE(rc))
958 {
959 pgmUnlock(pVM);
960 AssertLogRelRC(rc);
961 return rc;
962 }
963
964 /* Iterate the pages. */
965 for (unsigned i = 0; i < cPages; i++)
966 {
967 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
968 if ( pPage == NULL
969 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
970 {
971 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
972 break;
973 }
974
975 LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
976
977 /* Flush the shadow PT if this page was previously used as a guest page table. */
978 pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
979
980 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i]);
981 if (RT_FAILURE(rc))
982 {
983 pgmUnlock(pVM);
984 AssertLogRelRC(rc);
985 return rc;
986 }
987 Assert(PGM_PAGE_IS_ZERO(pPage));
988 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
989 }
990
991 if (cPendingPages)
992 {
993 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
994 if (RT_FAILURE(rc))
995 {
996 pgmUnlock(pVM);
997 AssertLogRelRC(rc);
998 return rc;
999 }
1000 }
1001 GMMR3FreePagesCleanup(pReq);
1002 }
1003 else
1004 {
1005 /* Iterate the pages. */
1006 for (unsigned i = 0; i < cPages; i++)
1007 {
1008 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
1009 AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
1010
1011 LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
1012
1013 Assert(PGM_PAGE_IS_BALLOONED(pPage));
1014
1015 /* Change back to zero page. */
1016 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1017 }
1018
1019 /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
1020 }
1021
1022 /* Notify GMM about the balloon change. */
1023 rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
1024 if (RT_SUCCESS(rc))
1025 {
1026 if (!fInflate)
1027 {
1028 Assert(pVM->pgm.s.cBalloonedPages >= cPages);
1029 pVM->pgm.s.cBalloonedPages -= cPages;
1030 }
1031 else
1032 pVM->pgm.s.cBalloonedPages += cPages;
1033 }
1034
1035 pgmUnlock(pVM);
1036
1037 /* Flush the recompiler's TLB as well. */
1038 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1039 CPUMSetChangedFlags(&pVM->aCpus[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1040
1041 AssertLogRelRC(rc);
1042 return rc;
1043}
1044
1045
1046/**
1047 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
1048 *
1049 * @returns VBox status code.
1050 * @param pVM The VM handle.
1051 * @param fInflate Inflate or deflate memory balloon
1052 * @param cPages Number of pages to free
1053 * @param paPhysPage Array of guest physical addresses
1054 */
1055static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1056{
1057 uintptr_t paUser[3];
1058
1059 paUser[0] = fInflate;
1060 paUser[1] = cPages;
1061 paUser[2] = (uintptr_t)paPhysPage;
1062 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1063 AssertRC(rc);
1064
1065 /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
1066 RTMemFree(paPhysPage);
1067}
1068
1069#endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
1070
1071/**
1072 * Inflate or deflate a memory balloon
1073 *
1074 * @returns VBox status code.
1075 * @param pVM The VM handle.
1076 * @param fInflate Inflate or deflate memory balloon
1077 * @param cPages Number of pages to free
1078 * @param paPhysPage Array of guest physical addresses
1079 */
1080VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1081{
1082 /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
1083#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
1084 int rc;
1085
1086 /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
1087 AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
1088
1089 /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
1090 * In the SMP case we post a request packet to postpone the job.
1091 */
1092 if (pVM->cCpus > 1)
1093 {
1094 unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
1095 RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
1096 AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
1097
1098 memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
1099
1100 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
1101 AssertRC(rc);
1102 }
1103 else
1104 {
1105 uintptr_t paUser[3];
1106
1107 paUser[0] = fInflate;
1108 paUser[1] = cPages;
1109 paUser[2] = (uintptr_t)paPhysPage;
1110 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1111 AssertRC(rc);
1112 }
1113 return rc;
1114
1115#else
1116 NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
1117 return VERR_NOT_IMPLEMENTED;
1118#endif
1119}
1120
1121
1122/**
1123 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
1124 * physical RAM.
1125 *
1126 * This is only called on one of the EMTs while the other ones are waiting for
1127 * it to complete this function.
1128 *
1129 * @returns VINF_SUCCESS (VBox strict status code).
1130 * @param pVM The VM handle.
1131 * @param pVCpu The VMCPU for the EMT we're being called on. Unused.
1132 * @param pvUser User parameter, unused.
1133 */
1134static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
1135{
1136 int rc = VINF_SUCCESS;
1137 NOREF(pvUser); NOREF(pVCpu);
1138
1139 pgmLock(pVM);
1140#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1141 pgmPoolResetDirtyPages(pVM);
1142#endif
1143
1144 /** @todo pointless to write protect the physical page pointed to by RSP. */
1145
1146 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
1147 pRam;
1148 pRam = pRam->CTX_SUFF(pNext))
1149 {
1150 uint32_t cPages = pRam->cb >> PAGE_SHIFT;
1151 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1152 {
1153 PPGMPAGE pPage = &pRam->aPages[iPage];
1154 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1155
1156 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
1157 || enmPageType == PGMPAGETYPE_MMIO2)
1158 {
1159 /*
1160 * A RAM page.
1161 */
1162 switch (PGM_PAGE_GET_STATE(pPage))
1163 {
1164 case PGM_PAGE_STATE_ALLOCATED:
1165 /** @todo Optimize this: Don't always re-enable write
1166 * monitoring if the page is known to be very busy. */
1167 if (PGM_PAGE_IS_WRITTEN_TO(pPage))
1168 {
1169 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1170 /* Remember this dirty page for the next (memory) sync. */
1171 PGM_PAGE_SET_FT_DIRTY(pPage);
1172 }
1173
1174 pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1175 break;
1176
1177 case PGM_PAGE_STATE_SHARED:
1178 AssertFailed();
1179 break;
1180
1181 case PGM_PAGE_STATE_WRITE_MONITORED: /* nothing to change. */
1182 default:
1183 break;
1184 }
1185 }
1186 }
1187 }
1188 pgmR3PoolWriteProtectPages(pVM);
1189 PGM_INVL_ALL_VCPU_TLBS(pVM);
1190 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1191 CPUMSetChangedFlags(&pVM->aCpus[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1192
1193 pgmUnlock(pVM);
1194 return rc;
1195}
1196
1197/**
1198 * Protect all physical RAM to monitor writes
1199 *
1200 * @returns VBox status code.
1201 * @param pVM The VM handle.
1202 */
1203VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
1204{
1205 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1206
1207 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
1208 AssertRC(rc);
1209 return rc;
1210}
1211
1212/**
1213 * Enumerate all dirty FT pages.
1214 *
1215 * @returns VBox status code.
1216 * @param pVM The VM handle.
1217 * @param pfnEnum Enumerate callback handler.
1218 * @param pvUser Enumerate callback handler parameter.
1219 */
1220VMMR3DECL(int) PGMR3PhysEnumDirtyFTPages(PVM pVM, PFNPGMENUMDIRTYFTPAGES pfnEnum, void *pvUser)
1221{
1222 int rc = VINF_SUCCESS;
1223
1224 pgmLock(pVM);
1225 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
1226 pRam;
1227 pRam = pRam->CTX_SUFF(pNext))
1228 {
1229 uint32_t cPages = pRam->cb >> PAGE_SHIFT;
1230 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1231 {
1232 PPGMPAGE pPage = &pRam->aPages[iPage];
1233 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1234
1235 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
1236 || enmPageType == PGMPAGETYPE_MMIO2)
1237 {
1238 /*
1239 * A RAM page.
1240 */
1241 switch (PGM_PAGE_GET_STATE(pPage))
1242 {
1243 case PGM_PAGE_STATE_ALLOCATED:
1244 case PGM_PAGE_STATE_WRITE_MONITORED:
1245 if ( !PGM_PAGE_IS_WRITTEN_TO(pPage) /* not very recently updated? */
1246 && PGM_PAGE_IS_FT_DIRTY(pPage))
1247 {
1248 unsigned cbPageRange = PAGE_SIZE;
1249 unsigned iPageClean = iPage + 1;
1250 RTGCPHYS GCPhysPage = pRam->GCPhys + iPage * PAGE_SIZE;
1251 uint8_t *pu8Page = NULL;
1252 PGMPAGEMAPLOCK Lock;
1253
1254 /* Find the next clean page, so we can merge adjacent dirty pages. */
1255 for (; iPageClean < cPages; iPageClean++)
1256 {
1257 PPGMPAGE pPageNext = &pRam->aPages[iPageClean];
1258 if ( RT_UNLIKELY(PGM_PAGE_GET_TYPE(pPageNext) != PGMPAGETYPE_RAM)
1259 || PGM_PAGE_GET_STATE(pPageNext) != PGM_PAGE_STATE_ALLOCATED
1260 || PGM_PAGE_IS_WRITTEN_TO(pPageNext)
1261 || !PGM_PAGE_IS_FT_DIRTY(pPageNext)
1262 /* Crossing a chunk boundary? */
1263 || (GCPhysPage & GMM_PAGEID_IDX_MASK) != ((GCPhysPage + cbPageRange) & GMM_PAGEID_IDX_MASK)
1264 )
1265 break;
1266
1267 cbPageRange += PAGE_SIZE;
1268 }
1269
1270 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysPage, (const void **)&pu8Page, &Lock);
1271 if (RT_SUCCESS(rc))
1272 {
1273 /** @todo this is risky; the range might be changed, but little choice as the sync
1274 * costs a lot of time. */
1275 pgmUnlock(pVM);
1276 pfnEnum(pVM, GCPhysPage, pu8Page, cbPageRange, pvUser);
1277 pgmLock(pVM);
1278 PGMPhysReleasePageMappingLock(pVM, &Lock);
1279 }
1280
1281 for (iPage; iPage < iPageClean; iPage++)
1282 PGM_PAGE_CLEAR_FT_DIRTY(&pRam->aPages[iPage]);
1283
1284 iPage = iPageClean - 1;
1285 }
1286 break;
1287 }
1288 }
1289 }
1290 }
1291 pgmUnlock(pVM);
1292 return rc;
1293}
1294
1295
1296/**
1297 * Gets the number of ram ranges.
1298 *
1299 * @returns Number of ram ranges. Returns UINT32_MAX if @a pVM is invalid.
1300 * @param pVM The VM handle.
1301 */
1302VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM)
1303{
1304 VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
1305
1306 pgmLock(pVM);
1307 uint32_t cRamRanges = 0;
1308 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext))
1309 cRamRanges++;
1310 pgmUnlock(pVM);
1311 return cRamRanges;
1312}
1313
1314
1315/**
1316 * Get information about a range.
1317 *
1318 * @returns VINF_SUCCESS or VERR_OUT_OF_RANGE.
1319 * @param pVM The VM handle
1320 * @param iRange The ordinal of the range.
1321 * @param pGCPhysStart Where to return the start of the range. Optional.
1322 * @param pGCPhysLast Where to return the address of the last byte in the
1323 * range. Optional.
1324 * @param pfIsMmio Where to indicate that this is a pure MMIO range.
1325 * Optional.
1326 */
1327VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
1328 const char **ppszDesc, bool *pfIsMmio)
1329{
1330 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1331
1332 pgmLock(pVM);
1333 uint32_t iCurRange = 0;
1334 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++)
1335 if (iCurRange == iRange)
1336 {
1337 if (pGCPhysStart)
1338 *pGCPhysStart = pCur->GCPhys;
1339 if (pGCPhysLast)
1340 *pGCPhysLast = pCur->GCPhysLast;
1341 if (ppszDesc)
1342 *ppszDesc = pCur->pszDesc;
1343 if (pfIsMmio)
1344 *pfIsMmio = !!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO);
1345
1346 pgmUnlock(pVM);
1347 return VINF_SUCCESS;
1348 }
1349 pgmUnlock(pVM);
1350 return VERR_OUT_OF_RANGE;
1351}
1352
1353
1354/**
1355 * Query the amount of free memory inside VMMR0
1356 *
1357 * @returns VBox status code.
1358 * @param pVM The VM handle.
1359 * @param pcbAllocMem Where to return the amount of memory allocated
1360 * by VMs.
1361 * @param pcbFreeMem Where to return the amount of memory that is
1362 * allocated from the host but not currently used
1363 * by any VMs.
1364 * @param pcbBallonedMem Where to return the sum of memory that is
1365 * currently ballooned by the VMs.
1366 * @param pcbSharedMem Where to return the amount of memory that is
1367 * currently shared.
1368 */
1369VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PVM pVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
1370 uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
1371{
1372 uint64_t cAllocPages = 0;
1373 uint64_t cFreePages = 0;
1374 uint64_t cBalloonPages = 0;
1375 uint64_t cSharedPages = 0;
1376 int rc = GMMR3QueryHypervisorMemoryStats(pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
1377 AssertRCReturn(rc, rc);
1378
1379 if (pcbAllocMem)
1380 *pcbAllocMem = cAllocPages * _4K;
1381
1382 if (pcbFreeMem)
1383 *pcbFreeMem = cFreePages * _4K;
1384
1385 if (pcbBallonedMem)
1386 *pcbBallonedMem = cBalloonPages * _4K;
1387
1388 if (pcbSharedMem)
1389 *pcbSharedMem = cSharedPages * _4K;
1390
1391 Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
1392 cAllocPages, cFreePages, cBalloonPages, cSharedPages));
1393 return VINF_SUCCESS;
1394}
1395
1396
1397/**
1398 * Query memory stats for the VM.
1399 *
1400 * @returns VBox status code.
1401 * @param pVM The VM handle.
1402 * @param pcbTotalMem Where to return total amount memory the VM may
1403 * possibly use.
1404 * @param pcbPrivateMem Where to return the amount of private memory
1405 * currently allocated.
1406 * @param pcbSharedMem Where to return the amount of actually shared
1407 * memory currently used by the VM.
1408 * @param pcbZeroMem Where to return the amount of memory backed by
1409 * zero pages.
1410 *
1411 * @remarks The total mem is normally larger than the sum of the three
1412 * components. There are two reasons for this, first the amount of
1413 * shared memory is what we're sure is shared instead of what could
1414 * possibly be shared with someone. Secondly, because the total may
1415 * include some pure MMIO pages that doesn't go into any of the three
1416 * sub-counts.
1417 *
1418 * @todo Why do we return reused shared pages instead of anything that could
1419 * potentially be shared? Doesn't this mean the first VM gets a much
1420 * lower number of shared pages?
1421 */
1422VMMR3DECL(int) PGMR3QueryMemoryStats(PVM pVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
1423 uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
1424{
1425 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1426
1427 if (pcbTotalMem)
1428 *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * PAGE_SIZE;
1429
1430 if (pcbPrivateMem)
1431 *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * PAGE_SIZE;
1432
1433 if (pcbSharedMem)
1434 *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * PAGE_SIZE;
1435
1436 if (pcbZeroMem)
1437 *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * PAGE_SIZE;
1438
1439 Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
1440 return VINF_SUCCESS;
1441}
1442
1443
1444/**
1445 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
1446 *
1447 * @param pVM The VM handle.
1448 * @param pNew The new RAM range.
1449 * @param GCPhys The address of the RAM range.
1450 * @param GCPhysLast The last address of the RAM range.
1451 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
1452 * if in HMA.
1453 * @param R0PtrNew Ditto for R0.
1454 * @param pszDesc The description.
1455 * @param pPrev The previous RAM range (for linking).
1456 */
1457static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
1458 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
1459{
1460 /*
1461 * Initialize the range.
1462 */
1463 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
1464 pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
1465 pNew->GCPhys = GCPhys;
1466 pNew->GCPhysLast = GCPhysLast;
1467 pNew->cb = GCPhysLast - GCPhys + 1;
1468 pNew->pszDesc = pszDesc;
1469 pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
1470 pNew->pvR3 = NULL;
1471 pNew->paLSPages = NULL;
1472
1473 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
1474 RTGCPHYS iPage = cPages;
1475 while (iPage-- > 0)
1476 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
1477
1478 /* Update the page count stats. */
1479 pVM->pgm.s.cZeroPages += cPages;
1480 pVM->pgm.s.cAllPages += cPages;
1481
1482 /*
1483 * Link it.
1484 */
1485 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
1486}
1487
1488
1489/**
1490 * Relocate a floating RAM range.
1491 *
1492 * @copydoc FNPGMRELOCATE.
1493 */
1494static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
1495{
1496 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
1497 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
1498 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE);
1499
1500 switch (enmMode)
1501 {
1502 case PGMRELOCATECALL_SUGGEST:
1503 return true;
1504
1505 case PGMRELOCATECALL_RELOCATE:
1506 {
1507 /*
1508 * Update myself, then relink all the ranges and flush the RC TLB.
1509 */
1510 pgmLock(pVM);
1511
1512 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
1513
1514 pgmR3PhysRelinkRamRanges(pVM);
1515 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
1516 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
1517
1518 pgmUnlock(pVM);
1519 return true;
1520 }
1521
1522 default:
1523 AssertFailedReturn(false);
1524 }
1525}
1526
1527
1528/**
1529 * PGMR3PhysRegisterRam worker that registers a high chunk.
1530 *
1531 * @returns VBox status code.
1532 * @param pVM The VM handle.
1533 * @param GCPhys The address of the RAM.
1534 * @param cRamPages The number of RAM pages to register.
1535 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
1536 * @param iChunk The chunk number.
1537 * @param pszDesc The RAM range description.
1538 * @param ppPrev Previous RAM range pointer. In/Out.
1539 */
1540static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
1541 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
1542 PPGMRAMRANGE *ppPrev)
1543{
1544 const char *pszDescChunk = iChunk == 0
1545 ? pszDesc
1546 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
1547 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
1548
1549 /*
1550 * Allocate memory for the new chunk.
1551 */
1552 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
1553 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
1554 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
1555 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
1556 void *pvChunk = NULL;
1557 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
1558#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1559 VMMIsHwVirtExtForced(pVM) ? &R0PtrChunk : NULL,
1560#else
1561 NULL,
1562#endif
1563 paChunkPages);
1564 if (RT_SUCCESS(rc))
1565 {
1566#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1567 if (!VMMIsHwVirtExtForced(pVM))
1568 R0PtrChunk = NIL_RTR0PTR;
1569#else
1570 R0PtrChunk = (uintptr_t)pvChunk;
1571#endif
1572 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
1573
1574 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
1575
1576 /*
1577 * Create a mapping and map the pages into it.
1578 * We push these in below the HMA.
1579 */
1580 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
1581 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
1582 if (RT_SUCCESS(rc))
1583 {
1584 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
1585
1586 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
1587 RTGCPTR GCPtrPage = GCPtrChunk;
1588 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
1589 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
1590 if (RT_SUCCESS(rc))
1591 {
1592 /*
1593 * Ok, init and link the range.
1594 */
1595 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
1596 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
1597 *ppPrev = pNew;
1598 }
1599 }
1600
1601 if (RT_FAILURE(rc))
1602 SUPR3PageFreeEx(pvChunk, cChunkPages);
1603 }
1604
1605 RTMemTmpFree(paChunkPages);
1606 return rc;
1607}
1608
1609
1610/**
1611 * Sets up a range RAM.
1612 *
1613 * This will check for conflicting registrations, make a resource
1614 * reservation for the memory (with GMM), and setup the per-page
1615 * tracking structures (PGMPAGE).
1616 *
1617 * @returns VBox status code.
1618 * @param pVM Pointer to the shared VM structure.
1619 * @param GCPhys The physical address of the RAM.
1620 * @param cb The size of the RAM.
1621 * @param pszDesc The description - not copied, so, don't free or change it.
1622 */
1623VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
1624{
1625 /*
1626 * Validate input.
1627 */
1628 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
1629 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1630 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1631 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
1632 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1633 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
1634 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1635 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1636
1637 pgmLock(pVM);
1638
1639 /*
1640 * Find range location and check for conflicts.
1641 * (We don't lock here because the locking by EMT is only required on update.)
1642 */
1643 PPGMRAMRANGE pPrev = NULL;
1644 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
1645 while (pRam && GCPhysLast >= pRam->GCPhys)
1646 {
1647 if ( GCPhysLast >= pRam->GCPhys
1648 && GCPhys <= pRam->GCPhysLast)
1649 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1650 GCPhys, GCPhysLast, pszDesc,
1651 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1652 VERR_PGM_RAM_CONFLICT);
1653
1654 /* next */
1655 pPrev = pRam;
1656 pRam = pRam->pNextR3;
1657 }
1658
1659 /*
1660 * Register it with GMM (the API bitches).
1661 */
1662 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
1663 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
1664 if (RT_FAILURE(rc))
1665 {
1666 pgmUnlock(pVM);
1667 return rc;
1668 }
1669
1670 if ( GCPhys >= _4G
1671 && cPages > 256)
1672 {
1673 /*
1674 * The PGMRAMRANGE structures for the high memory can get very big.
1675 * In order to avoid SUPR3PageAllocEx allocation failures due to the
1676 * allocation size limit there and also to avoid being unable to find
1677 * guest mapping space for them, we split this memory up into 4MB in
1678 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
1679 * mode.
1680 *
1681 * The first and last page of each mapping are guard pages and marked
1682 * not-present. So, we've got 4186112 and 16769024 bytes available for
1683 * the PGMRAMRANGE structure.
1684 *
1685 * Note! The sizes used here will influence the saved state.
1686 */
1687 uint32_t cbChunk;
1688 uint32_t cPagesPerChunk;
1689 if (VMMIsHwVirtExtForced(pVM))
1690 {
1691 cbChunk = 16U*_1M;
1692 cPagesPerChunk = 1048048; /* max ~1048059 */
1693 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
1694 }
1695 else
1696 {
1697 cbChunk = 4U*_1M;
1698 cPagesPerChunk = 261616; /* max ~261627 */
1699 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616 < 4U*_1M - PAGE_SIZE * 2);
1700 }
1701 AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
1702
1703 RTGCPHYS cPagesLeft = cPages;
1704 RTGCPHYS GCPhysChunk = GCPhys;
1705 uint32_t iChunk = 0;
1706 while (cPagesLeft > 0)
1707 {
1708 uint32_t cPagesInChunk = cPagesLeft;
1709 if (cPagesInChunk > cPagesPerChunk)
1710 cPagesInChunk = cPagesPerChunk;
1711
1712 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1713 AssertRCReturn(rc, rc);
1714
1715 /* advance */
1716 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1717 cPagesLeft -= cPagesInChunk;
1718 iChunk++;
1719 }
1720 }
1721 else
1722 {
1723 /*
1724 * Allocate, initialize and link the new RAM range.
1725 */
1726 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1727 PPGMRAMRANGE pNew;
1728 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1729 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1730
1731 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1732 }
1733 pgmPhysInvalidatePageMapTLB(pVM);
1734 pgmUnlock(pVM);
1735
1736 /*
1737 * Notify REM.
1738 */
1739 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
1740
1741 return VINF_SUCCESS;
1742}
1743
1744
1745/**
1746 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
1747 *
1748 * We do this late in the init process so that all the ROM and MMIO ranges have
1749 * been registered already and we don't go wasting memory on them.
1750 *
1751 * @returns VBox status code.
1752 *
1753 * @param pVM Pointer to the shared VM structure.
1754 */
1755int pgmR3PhysRamPreAllocate(PVM pVM)
1756{
1757 Assert(pVM->pgm.s.fRamPreAlloc);
1758 Log(("pgmR3PhysRamPreAllocate: enter\n"));
1759
1760 /*
1761 * Walk the RAM ranges and allocate all RAM pages, halt at
1762 * the first allocation error.
1763 */
1764 uint64_t cPages = 0;
1765 uint64_t NanoTS = RTTimeNanoTS();
1766 pgmLock(pVM);
1767 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
1768 {
1769 PPGMPAGE pPage = &pRam->aPages[0];
1770 RTGCPHYS GCPhys = pRam->GCPhys;
1771 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
1772 while (cLeft-- > 0)
1773 {
1774 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1775 {
1776 switch (PGM_PAGE_GET_STATE(pPage))
1777 {
1778 case PGM_PAGE_STATE_ZERO:
1779 {
1780 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
1781 if (RT_FAILURE(rc))
1782 {
1783 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
1784 pgmUnlock(pVM);
1785 return rc;
1786 }
1787 cPages++;
1788 break;
1789 }
1790
1791 case PGM_PAGE_STATE_BALLOONED:
1792 case PGM_PAGE_STATE_ALLOCATED:
1793 case PGM_PAGE_STATE_WRITE_MONITORED:
1794 case PGM_PAGE_STATE_SHARED:
1795 /* nothing to do here. */
1796 break;
1797 }
1798 }
1799
1800 /* next */
1801 pPage++;
1802 GCPhys += PAGE_SIZE;
1803 }
1804 }
1805 pgmUnlock(pVM);
1806 NanoTS = RTTimeNanoTS() - NanoTS;
1807
1808 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
1809 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
1810 return VINF_SUCCESS;
1811}
1812
1813
1814/**
1815 * Resets (zeros) the RAM.
1816 *
1817 * ASSUMES that the caller owns the PGM lock.
1818 *
1819 * @returns VBox status code.
1820 * @param pVM Pointer to the shared VM structure.
1821 */
1822int pgmR3PhysRamReset(PVM pVM)
1823{
1824 PGM_LOCK_ASSERT_OWNER(pVM);
1825
1826 /* Reset the memory balloon. */
1827 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
1828 AssertRC(rc);
1829
1830#ifdef VBOX_WITH_PAGE_SHARING
1831 /* Clear all registered shared modules. */
1832 rc = GMMR3ResetSharedModules(pVM);
1833 AssertRC(rc);
1834#endif
1835 /* Reset counters. */
1836 pVM->pgm.s.cReusedSharedPages = 0;
1837 pVM->pgm.s.cBalloonedPages = 0;
1838
1839 /*
1840 * We batch up pages that should be freed instead of calling GMM for
1841 * each and every one of them.
1842 */
1843 uint32_t cPendingPages = 0;
1844 PGMMFREEPAGESREQ pReq;
1845 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1846 AssertLogRelRCReturn(rc, rc);
1847
1848 /*
1849 * Walk the ram ranges.
1850 */
1851 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
1852 {
1853 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
1854 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
1855
1856 if (!pVM->pgm.s.fRamPreAlloc)
1857 {
1858 /* Replace all RAM pages by ZERO pages. */
1859 while (iPage-- > 0)
1860 {
1861 PPGMPAGE pPage = &pRam->aPages[iPage];
1862 switch (PGM_PAGE_GET_TYPE(pPage))
1863 {
1864 case PGMPAGETYPE_RAM:
1865 /* Do not replace pages part of a 2 MB continuous range
1866 with zero pages, but zero them instead. */
1867 if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
1868 || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
1869 {
1870 void *pvPage;
1871 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
1872 AssertLogRelRCReturn(rc, rc);
1873 ASMMemZeroPage(pvPage);
1874 }
1875 else if (PGM_PAGE_IS_BALLOONED(pPage))
1876 {
1877 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
1878 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1879 }
1880 else if (!PGM_PAGE_IS_ZERO(pPage))
1881 {
1882 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1883 AssertLogRelRCReturn(rc, rc);
1884 }
1885 break;
1886
1887 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1888 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
1889 true /*fDoAccounting*/);
1890 break;
1891
1892 case PGMPAGETYPE_MMIO2:
1893 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
1894 case PGMPAGETYPE_ROM:
1895 case PGMPAGETYPE_MMIO:
1896 break;
1897 default:
1898 AssertFailed();
1899 }
1900 } /* for each page */
1901 }
1902 else
1903 {
1904 /* Zero the memory. */
1905 while (iPage-- > 0)
1906 {
1907 PPGMPAGE pPage = &pRam->aPages[iPage];
1908 switch (PGM_PAGE_GET_TYPE(pPage))
1909 {
1910 case PGMPAGETYPE_RAM:
1911 switch (PGM_PAGE_GET_STATE(pPage))
1912 {
1913 case PGM_PAGE_STATE_ZERO:
1914 break;
1915
1916 case PGM_PAGE_STATE_BALLOONED:
1917 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
1918 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1919 break;
1920
1921 case PGM_PAGE_STATE_SHARED:
1922 case PGM_PAGE_STATE_WRITE_MONITORED:
1923 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1924 AssertLogRelRCReturn(rc, rc);
1925 /* no break */
1926
1927 case PGM_PAGE_STATE_ALLOCATED:
1928 {
1929 void *pvPage;
1930 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
1931 AssertLogRelRCReturn(rc, rc);
1932 ASMMemZeroPage(pvPage);
1933 break;
1934 }
1935 }
1936 break;
1937
1938 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1939 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
1940 true /*fDoAccounting*/);
1941 break;
1942
1943 case PGMPAGETYPE_MMIO2:
1944 case PGMPAGETYPE_ROM_SHADOW:
1945 case PGMPAGETYPE_ROM:
1946 case PGMPAGETYPE_MMIO:
1947 break;
1948 default:
1949 AssertFailed();
1950
1951 }
1952 } /* for each page */
1953 }
1954
1955 }
1956
1957 /*
1958 * Finish off any pages pending freeing.
1959 */
1960 if (cPendingPages)
1961 {
1962 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1963 AssertLogRelRCReturn(rc, rc);
1964 }
1965 GMMR3FreePagesCleanup(pReq);
1966
1967 return VINF_SUCCESS;
1968}
1969
1970/**
1971 * Frees all RAM during VM termination
1972 *
1973 * ASSUMES that the caller owns the PGM lock.
1974 *
1975 * @returns VBox status code.
1976 * @param pVM Pointer to the shared VM structure.
1977 */
1978int pgmR3PhysRamTerm(PVM pVM)
1979{
1980 PGM_LOCK_ASSERT_OWNER(pVM);
1981
1982 /* Reset the memory balloon. */
1983 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
1984 AssertRC(rc);
1985
1986#ifdef VBOX_WITH_PAGE_SHARING
1987 /* Clear all registered shared modules. */
1988 rc = GMMR3ResetSharedModules(pVM);
1989 AssertRC(rc);
1990#endif
1991
1992 /*
1993 * We batch up pages that should be freed instead of calling GMM for
1994 * each and every one of them.
1995 */
1996 uint32_t cPendingPages = 0;
1997 PGMMFREEPAGESREQ pReq;
1998 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1999 AssertLogRelRCReturn(rc, rc);
2000
2001 /*
2002 * Walk the ram ranges.
2003 */
2004 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2005 {
2006 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2007 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2008
2009 /* Replace all RAM pages by ZERO pages. */
2010 while (iPage-- > 0)
2011 {
2012 PPGMPAGE pPage = &pRam->aPages[iPage];
2013 switch (PGM_PAGE_GET_TYPE(pPage))
2014 {
2015 case PGMPAGETYPE_RAM:
2016 /* Free all shared pages. Private pages are automatically freed during GMM VM cleanup. */
2017 if (PGM_PAGE_IS_SHARED(pPage))
2018 {
2019 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
2020 AssertLogRelRCReturn(rc, rc);
2021 }
2022 break;
2023
2024 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2025 case PGMPAGETYPE_MMIO2:
2026 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2027 case PGMPAGETYPE_ROM:
2028 case PGMPAGETYPE_MMIO:
2029 break;
2030 default:
2031 AssertFailed();
2032 }
2033 } /* for each page */
2034 }
2035
2036 /*
2037 * Finish off any pages pending freeing.
2038 */
2039 if (cPendingPages)
2040 {
2041 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2042 AssertLogRelRCReturn(rc, rc);
2043 }
2044 GMMR3FreePagesCleanup(pReq);
2045 return VINF_SUCCESS;
2046}
2047
2048/**
2049 * This is the interface IOM is using to register an MMIO region.
2050 *
2051 * It will check for conflicts and ensure that a RAM range structure
2052 * is present before calling the PGMR3HandlerPhysicalRegister API to
2053 * register the callbacks.
2054 *
2055 * @returns VBox status code.
2056 *
2057 * @param pVM Pointer to the shared VM structure.
2058 * @param GCPhys The start of the MMIO region.
2059 * @param cb The size of the MMIO region.
2060 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
2061 * @param pvUserR3 The user argument for R3.
2062 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
2063 * @param pvUserR0 The user argument for R0.
2064 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
2065 * @param pvUserRC The user argument for RC.
2066 * @param pszDesc The description of the MMIO region.
2067 */
2068VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
2069 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
2070 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
2071 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
2072 R3PTRTYPE(const char *) pszDesc)
2073{
2074 /*
2075 * Assert on some assumption.
2076 */
2077 VM_ASSERT_EMT(pVM);
2078 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2079 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2080 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2081 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2082
2083 int rc = pgmLock(pVM);
2084 AssertRCReturn(rc, rc);
2085
2086 /*
2087 * Make sure there's a RAM range structure for the region.
2088 */
2089 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2090 bool fRamExists = false;
2091 PPGMRAMRANGE pRamPrev = NULL;
2092 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2093 while (pRam && GCPhysLast >= pRam->GCPhys)
2094 {
2095 if ( GCPhysLast >= pRam->GCPhys
2096 && GCPhys <= pRam->GCPhysLast)
2097 {
2098 /* Simplification: all within the same range. */
2099 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
2100 && GCPhysLast <= pRam->GCPhysLast,
2101 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
2102 GCPhys, GCPhysLast, pszDesc,
2103 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2104 pgmUnlock(pVM),
2105 VERR_PGM_RAM_CONFLICT);
2106
2107 /* Check that it's all RAM or MMIO pages. */
2108 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2109 uint32_t cLeft = cb >> PAGE_SHIFT;
2110 while (cLeft-- > 0)
2111 {
2112 AssertLogRelMsgReturnStmt( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
2113 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
2114 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
2115 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
2116 pgmUnlock(pVM),
2117 VERR_PGM_RAM_CONFLICT);
2118 pPage++;
2119 }
2120
2121 /* Looks good. */
2122 fRamExists = true;
2123 break;
2124 }
2125
2126 /* next */
2127 pRamPrev = pRam;
2128 pRam = pRam->pNextR3;
2129 }
2130 PPGMRAMRANGE pNew;
2131 if (fRamExists)
2132 {
2133 pNew = NULL;
2134
2135 /*
2136 * Make all the pages in the range MMIO/ZERO pages, freeing any
2137 * RAM pages currently mapped here. This might not be 100% correct
2138 * for PCI memory, but we're doing the same thing for MMIO2 pages.
2139 */
2140 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
2141 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
2142
2143 /* Force a PGM pool flush as guest ram references have been changed. */
2144 /** @todo not entirely SMP safe; assuming for now the guest takes
2145 * care of this internally (not touch mapped mmio while changing the
2146 * mapping). */
2147 PVMCPU pVCpu = VMMGetCpu(pVM);
2148 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2149 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2150 }
2151 else
2152 {
2153
2154 /*
2155 * No RAM range, insert an ad hoc one.
2156 *
2157 * Note that we don't have to tell REM about this range because
2158 * PGMHandlerPhysicalRegisterEx will do that for us.
2159 */
2160 Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
2161
2162 const uint32_t cPages = cb >> PAGE_SHIFT;
2163 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
2164 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
2165 AssertLogRelMsgRCReturnStmt(rc, ("cbRamRange=%zu\n", cbRamRange), pgmUnlock(pVM), rc);
2166
2167 /* Initialize the range. */
2168 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
2169 pNew->pSelfRC = MMHyperCCToRC(pVM, pNew);
2170 pNew->GCPhys = GCPhys;
2171 pNew->GCPhysLast = GCPhysLast;
2172 pNew->cb = cb;
2173 pNew->pszDesc = pszDesc;
2174 pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
2175 pNew->pvR3 = NULL;
2176 pNew->paLSPages = NULL;
2177
2178 uint32_t iPage = cPages;
2179 while (iPage-- > 0)
2180 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
2181 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
2182
2183 /* update the page count stats. */
2184 pVM->pgm.s.cPureMmioPages += cPages;
2185 pVM->pgm.s.cAllPages += cPages;
2186
2187 /* link it */
2188 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
2189 }
2190
2191 /*
2192 * Register the access handler.
2193 */
2194 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
2195 pfnHandlerR3, pvUserR3,
2196 pfnHandlerR0, pvUserR0,
2197 pfnHandlerRC, pvUserRC, pszDesc);
2198 if ( RT_FAILURE(rc)
2199 && !fRamExists)
2200 {
2201 pVM->pgm.s.cPureMmioPages -= cb >> PAGE_SHIFT;
2202 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
2203
2204 /* remove the ad hoc range. */
2205 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
2206 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
2207 MMHyperFree(pVM, pRam);
2208 }
2209 pgmPhysInvalidatePageMapTLB(pVM);
2210
2211 pgmUnlock(pVM);
2212 return rc;
2213}
2214
2215
2216/**
2217 * This is the interface IOM is using to register an MMIO region.
2218 *
2219 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
2220 * any ad hoc PGMRAMRANGE left behind.
2221 *
2222 * @returns VBox status code.
2223 * @param pVM Pointer to the shared VM structure.
2224 * @param GCPhys The start of the MMIO region.
2225 * @param cb The size of the MMIO region.
2226 */
2227VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
2228{
2229 VM_ASSERT_EMT(pVM);
2230
2231 int rc = pgmLock(pVM);
2232 AssertRCReturn(rc, rc);
2233
2234 /*
2235 * First deregister the handler, then check if we should remove the ram range.
2236 */
2237 rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2238 if (RT_SUCCESS(rc))
2239 {
2240 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2241 PPGMRAMRANGE pRamPrev = NULL;
2242 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2243 while (pRam && GCPhysLast >= pRam->GCPhys)
2244 {
2245 /** @todo We're being a bit too careful here. rewrite. */
2246 if ( GCPhysLast == pRam->GCPhysLast
2247 && GCPhys == pRam->GCPhys)
2248 {
2249 Assert(pRam->cb == cb);
2250
2251 /*
2252 * See if all the pages are dead MMIO pages.
2253 */
2254 uint32_t const cPages = cb >> PAGE_SHIFT;
2255 bool fAllMMIO = true;
2256 uint32_t iPage = 0;
2257 uint32_t cLeft = cPages;
2258 while (cLeft-- > 0)
2259 {
2260 PPGMPAGE pPage = &pRam->aPages[iPage];
2261 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
2262 /*|| not-out-of-action later */)
2263 {
2264 fAllMMIO = false;
2265 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
2266 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2267 break;
2268 }
2269 Assert(PGM_PAGE_IS_ZERO(pPage));
2270 pPage++;
2271 }
2272 if (fAllMMIO)
2273 {
2274 /*
2275 * Ad-hoc range, unlink and free it.
2276 */
2277 Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
2278 GCPhys, GCPhysLast, pRam->pszDesc));
2279
2280 pVM->pgm.s.cAllPages -= cPages;
2281 pVM->pgm.s.cPureMmioPages -= cPages;
2282
2283 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
2284 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
2285 MMHyperFree(pVM, pRam);
2286 break;
2287 }
2288 }
2289
2290 /*
2291 * Range match? It will all be within one range (see PGMAllHandler.cpp).
2292 */
2293 if ( GCPhysLast >= pRam->GCPhys
2294 && GCPhys <= pRam->GCPhysLast)
2295 {
2296 Assert(GCPhys >= pRam->GCPhys);
2297 Assert(GCPhysLast <= pRam->GCPhysLast);
2298
2299 /*
2300 * Turn the pages back into RAM pages.
2301 */
2302 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2303 uint32_t cLeft = cb >> PAGE_SHIFT;
2304 while (cLeft--)
2305 {
2306 PPGMPAGE pPage = &pRam->aPages[iPage];
2307 AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2308 AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2309 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
2310 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
2311 }
2312 break;
2313 }
2314
2315 /* next */
2316 pRamPrev = pRam;
2317 pRam = pRam->pNextR3;
2318 }
2319 }
2320
2321 /* Force a PGM pool flush as guest ram references have been changed. */
2322 /** todo; not entirely SMP safe; assuming for now the guest takes care of this internally (not touch mapped mmio while changing the mapping). */
2323 PVMCPU pVCpu = VMMGetCpu(pVM);
2324 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2325 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2326
2327 pgmPhysInvalidatePageMapTLB(pVM);
2328 pgmPhysInvalidRamRangeTlbs(pVM);
2329 pgmUnlock(pVM);
2330 return rc;
2331}
2332
2333
2334/**
2335 * Locate a MMIO2 range.
2336 *
2337 * @returns Pointer to the MMIO2 range.
2338 * @param pVM Pointer to the shared VM structure.
2339 * @param pDevIns The device instance owning the region.
2340 * @param iRegion The region.
2341 */
2342DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
2343{
2344 /*
2345 * Search the list.
2346 */
2347 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
2348 if ( pCur->pDevInsR3 == pDevIns
2349 && pCur->iRegion == iRegion)
2350 return pCur;
2351 return NULL;
2352}
2353
2354
2355/**
2356 * Allocate and register an MMIO2 region.
2357 *
2358 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM
2359 * associated with a device. It is also non-shared memory with a permanent
2360 * ring-3 mapping and page backing (presently).
2361 *
2362 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
2363 * the VM, in which case we'll drop the base memory pages. Presently we will
2364 * make no attempt to preserve anything that happens to be present in the base
2365 * memory that is replaced, this is of course incorrectly but it's too much
2366 * effort.
2367 *
2368 * @returns VBox status code.
2369 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
2370 * memory.
2371 * @retval VERR_ALREADY_EXISTS if the region already exists.
2372 *
2373 * @param pVM Pointer to the shared VM structure.
2374 * @param pDevIns The device instance owning the region.
2375 * @param iRegion The region number. If the MMIO2 memory is a PCI
2376 * I/O region this number has to be the number of that
2377 * region. Otherwise it can be any number safe
2378 * UINT8_MAX.
2379 * @param cb The size of the region. Must be page aligned.
2380 * @param fFlags Reserved for future use, must be zero.
2381 * @param ppv Where to store the pointer to the ring-3 mapping of
2382 * the memory.
2383 * @param pszDesc The description.
2384 */
2385VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
2386{
2387 /*
2388 * Validate input.
2389 */
2390 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2391 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2392 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2393 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
2394 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2395 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2396 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
2397 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2398 AssertReturn(cb, VERR_INVALID_PARAMETER);
2399 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2400
2401 const uint32_t cPages = cb >> PAGE_SHIFT;
2402 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
2403 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
2404
2405 /*
2406 * For the 2nd+ instance, mangle the description string so it's unique.
2407 */
2408 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
2409 {
2410 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
2411 if (!pszDesc)
2412 return VERR_NO_MEMORY;
2413 }
2414
2415 /*
2416 * Try reserve and allocate the backing memory first as this is what is
2417 * most likely to fail.
2418 */
2419 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
2420 if (RT_SUCCESS(rc))
2421 {
2422 void *pvPages;
2423 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
2424 if (RT_SUCCESS(rc))
2425 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
2426 if (RT_SUCCESS(rc))
2427 {
2428 memset(pvPages, 0, cPages * PAGE_SIZE);
2429
2430 /*
2431 * Create the MMIO2 range record for it.
2432 */
2433 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
2434 PPGMMMIO2RANGE pNew;
2435 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
2436 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
2437 if (RT_SUCCESS(rc))
2438 {
2439 pNew->pDevInsR3 = pDevIns;
2440 pNew->pvR3 = pvPages;
2441 //pNew->pNext = NULL;
2442 //pNew->fMapped = false;
2443 //pNew->fOverlapping = false;
2444 pNew->iRegion = iRegion;
2445 pNew->idSavedState = UINT8_MAX;
2446 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
2447 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange);
2448 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
2449 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
2450 pNew->RamRange.pszDesc = pszDesc;
2451 pNew->RamRange.cb = cb;
2452 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2;
2453 pNew->RamRange.pvR3 = pvPages;
2454 //pNew->RamRange.paLSPages = NULL;
2455
2456 uint32_t iPage = cPages;
2457 while (iPage-- > 0)
2458 {
2459 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
2460 paPages[iPage].Phys, NIL_GMM_PAGEID,
2461 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
2462 }
2463
2464 /* update page count stats */
2465 pVM->pgm.s.cAllPages += cPages;
2466 pVM->pgm.s.cPrivatePages += cPages;
2467
2468 /*
2469 * Link it into the list.
2470 * Since there is no particular order, just push it.
2471 */
2472 pgmLock(pVM);
2473 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
2474 pVM->pgm.s.pMmio2RangesR3 = pNew;
2475 pgmUnlock(pVM);
2476
2477 *ppv = pvPages;
2478 RTMemTmpFree(paPages);
2479 pgmPhysInvalidatePageMapTLB(pVM);
2480 return VINF_SUCCESS;
2481 }
2482
2483 SUPR3PageFreeEx(pvPages, cPages);
2484 }
2485 RTMemTmpFree(paPages);
2486 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
2487 }
2488 if (pDevIns->iInstance > 0)
2489 MMR3HeapFree((void *)pszDesc);
2490 return rc;
2491}
2492
2493
2494/**
2495 * Deregisters and frees an MMIO2 region.
2496 *
2497 * Any physical (and virtual) access handlers registered for the region must
2498 * be deregistered before calling this function.
2499 *
2500 * @returns VBox status code.
2501 * @param pVM Pointer to the shared VM structure.
2502 * @param pDevIns The device instance owning the region.
2503 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
2504 */
2505VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
2506{
2507 /*
2508 * Validate input.
2509 */
2510 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2511 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2512 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
2513
2514 pgmLock(pVM);
2515 int rc = VINF_SUCCESS;
2516 unsigned cFound = 0;
2517 PPGMMMIO2RANGE pPrev = NULL;
2518 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
2519 while (pCur)
2520 {
2521 if ( pCur->pDevInsR3 == pDevIns
2522 && ( iRegion == UINT32_MAX
2523 || pCur->iRegion == iRegion))
2524 {
2525 cFound++;
2526
2527 /*
2528 * Unmap it if it's mapped.
2529 */
2530 if (pCur->fMapped)
2531 {
2532 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
2533 AssertRC(rc2);
2534 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
2535 rc = rc2;
2536 }
2537
2538 /*
2539 * Unlink it
2540 */
2541 PPGMMMIO2RANGE pNext = pCur->pNextR3;
2542 if (pPrev)
2543 pPrev->pNextR3 = pNext;
2544 else
2545 pVM->pgm.s.pMmio2RangesR3 = pNext;
2546 pCur->pNextR3 = NULL;
2547
2548 /*
2549 * Free the memory.
2550 */
2551 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
2552 AssertRC(rc2);
2553 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
2554 rc = rc2;
2555
2556 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
2557 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
2558 AssertRC(rc2);
2559 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
2560 rc = rc2;
2561
2562 /* we're leaking hyper memory here if done at runtime. */
2563#ifdef VBOX_STRICT
2564 VMSTATE const enmState = VMR3GetState(pVM);
2565 AssertMsg( enmState == VMSTATE_POWERING_OFF
2566 || enmState == VMSTATE_POWERING_OFF_LS
2567 || enmState == VMSTATE_OFF
2568 || enmState == VMSTATE_OFF_LS
2569 || enmState == VMSTATE_DESTROYING
2570 || enmState == VMSTATE_TERMINATED
2571 || enmState == VMSTATE_CREATING
2572 , ("%s\n", VMR3GetStateName(enmState)));
2573#endif
2574 /*rc = MMHyperFree(pVM, pCur);
2575 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
2576
2577
2578 /* update page count stats */
2579 pVM->pgm.s.cAllPages -= cPages;
2580 pVM->pgm.s.cPrivatePages -= cPages;
2581
2582 /* next */
2583 pCur = pNext;
2584 }
2585 else
2586 {
2587 pPrev = pCur;
2588 pCur = pCur->pNextR3;
2589 }
2590 }
2591 pgmPhysInvalidatePageMapTLB(pVM);
2592 pgmUnlock(pVM);
2593 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
2594}
2595
2596
2597/**
2598 * Maps a MMIO2 region.
2599 *
2600 * This is done when a guest / the bios / state loading changes the
2601 * PCI config. The replacing of base memory has the same restrictions
2602 * as during registration, of course.
2603 *
2604 * @returns VBox status code.
2605 *
2606 * @param pVM Pointer to the shared VM structure.
2607 * @param pDevIns The
2608 */
2609VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2610{
2611 /*
2612 * Validate input
2613 */
2614 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2615 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2616 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2617 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2618 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2619 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2620
2621 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2622 AssertReturn(pCur, VERR_NOT_FOUND);
2623 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
2624 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
2625 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
2626
2627 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
2628 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2629
2630 /*
2631 * Find our location in the ram range list, checking for
2632 * restriction we don't bother implementing yet (partially overlapping).
2633 */
2634 bool fRamExists = false;
2635 PPGMRAMRANGE pRamPrev = NULL;
2636 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2637 while (pRam && GCPhysLast >= pRam->GCPhys)
2638 {
2639 if ( GCPhys <= pRam->GCPhysLast
2640 && GCPhysLast >= pRam->GCPhys)
2641 {
2642 /* completely within? */
2643 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2644 && GCPhysLast <= pRam->GCPhysLast,
2645 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
2646 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
2647 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2648 VERR_PGM_RAM_CONFLICT);
2649 fRamExists = true;
2650 break;
2651 }
2652
2653 /* next */
2654 pRamPrev = pRam;
2655 pRam = pRam->pNextR3;
2656 }
2657 if (fRamExists)
2658 {
2659 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2660 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2661 while (cPagesLeft-- > 0)
2662 {
2663 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2664 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
2665 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
2666 VERR_PGM_RAM_CONFLICT);
2667 pPage++;
2668 }
2669 }
2670 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
2671 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
2672
2673 /*
2674 * Make the changes.
2675 */
2676 pgmLock(pVM);
2677
2678 pCur->RamRange.GCPhys = GCPhys;
2679 pCur->RamRange.GCPhysLast = GCPhysLast;
2680 pCur->fMapped = true;
2681 pCur->fOverlapping = fRamExists;
2682
2683 if (fRamExists)
2684 {
2685/** @todo use pgmR3PhysFreePageRange here. */
2686 uint32_t cPendingPages = 0;
2687 PGMMFREEPAGESREQ pReq;
2688 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2689 AssertLogRelRCReturn(rc, rc);
2690
2691 /* replace the pages, freeing all present RAM pages. */
2692 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
2693 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2694 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2695 while (cPagesLeft-- > 0)
2696 {
2697 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
2698 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
2699
2700 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
2701 PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
2702 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
2703 PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
2704 PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
2705 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
2706 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
2707
2708 pVM->pgm.s.cZeroPages--;
2709 GCPhys += PAGE_SIZE;
2710 pPageSrc++;
2711 pPageDst++;
2712 }
2713
2714 /* Flush physical page map TLB. */
2715 pgmPhysInvalidatePageMapTLB(pVM);
2716
2717 if (cPendingPages)
2718 {
2719 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2720 AssertLogRelRCReturn(rc, rc);
2721 }
2722 GMMR3FreePagesCleanup(pReq);
2723
2724 /* Force a PGM pool flush as guest ram references have been changed. */
2725 /** todo; not entirely SMP safe; assuming for now the guest takes care of this internally (not touch mapped mmio while changing the mapping). */
2726 PVMCPU pVCpu = VMMGetCpu(pVM);
2727 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2728 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2729
2730 pgmUnlock(pVM);
2731 }
2732 else
2733 {
2734 RTGCPHYS cb = pCur->RamRange.cb;
2735
2736 /* Clear the tracking data of pages we're going to reactivate. */
2737 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
2738 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2739 while (cPagesLeft-- > 0)
2740 {
2741 PGM_PAGE_SET_TRACKING(pVM, pPageSrc, 0);
2742 PGM_PAGE_SET_PTE_INDEX(pVM, pPageSrc, 0);
2743 pPageSrc++;
2744 }
2745
2746 /* link in the ram range */
2747 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
2748 pgmUnlock(pVM);
2749
2750 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
2751 }
2752
2753 pgmPhysInvalidatePageMapTLB(pVM);
2754 return VINF_SUCCESS;
2755}
2756
2757
2758/**
2759 * Unmaps a MMIO2 region.
2760 *
2761 * This is done when a guest / the bios / state loading changes the
2762 * PCI config. The replacing of base memory has the same restrictions
2763 * as during registration, of course.
2764 */
2765VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2766{
2767 /*
2768 * Validate input
2769 */
2770 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2771 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2772 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2773 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2774 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2775 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2776
2777 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2778 AssertReturn(pCur, VERR_NOT_FOUND);
2779 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
2780 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
2781 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
2782
2783 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
2784 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
2785
2786 /*
2787 * Unmap it.
2788 */
2789 pgmLock(pVM);
2790
2791 RTGCPHYS GCPhysRangeREM;
2792 RTGCPHYS cbRangeREM;
2793 bool fInformREM;
2794 if (pCur->fOverlapping)
2795 {
2796 /* Restore the RAM pages we've replaced. */
2797 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2798 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
2799 pRam = pRam->pNextR3;
2800
2801 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2802 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2803 while (cPagesLeft-- > 0)
2804 {
2805 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
2806 pVM->pgm.s.cZeroPages++;
2807 pPageDst++;
2808 }
2809
2810 /* Flush physical page map TLB. */
2811 pgmPhysInvalidatePageMapTLB(pVM);
2812
2813 GCPhysRangeREM = NIL_RTGCPHYS; /* shuts up gcc */
2814 cbRangeREM = RTGCPHYS_MAX; /* ditto */
2815 fInformREM = false;
2816 }
2817 else
2818 {
2819 GCPhysRangeREM = pCur->RamRange.GCPhys;
2820 cbRangeREM = pCur->RamRange.cb;
2821 fInformREM = true;
2822
2823 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
2824 }
2825
2826 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
2827 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
2828 pCur->fOverlapping = false;
2829 pCur->fMapped = false;
2830
2831 /* Force a PGM pool flush as guest ram references have been changed. */
2832 /** @todo not entirely SMP safe; assuming for now the guest takes care
2833 * of this internally (not touch mapped mmio while changing the
2834 * mapping). */
2835 PVMCPU pVCpu = VMMGetCpu(pVM);
2836 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2837 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2838
2839 pgmPhysInvalidatePageMapTLB(pVM);
2840 pgmPhysInvalidRamRangeTlbs(pVM);
2841 pgmUnlock(pVM);
2842
2843 if (fInformREM)
2844 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, cbRangeREM);
2845
2846 return VINF_SUCCESS;
2847}
2848
2849
2850/**
2851 * Checks if the given address is an MMIO2 base address or not.
2852 *
2853 * @returns true/false accordingly.
2854 * @param pVM Pointer to the shared VM structure.
2855 * @param pDevIns The owner of the memory, optional.
2856 * @param GCPhys The address to check.
2857 */
2858VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
2859{
2860 /*
2861 * Validate input
2862 */
2863 VM_ASSERT_EMT_RETURN(pVM, false);
2864 AssertPtrReturn(pDevIns, false);
2865 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
2866 AssertReturn(GCPhys != 0, false);
2867 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
2868
2869 /*
2870 * Search the list.
2871 */
2872 pgmLock(pVM);
2873 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
2874 if (pCur->RamRange.GCPhys == GCPhys)
2875 {
2876 Assert(pCur->fMapped);
2877 pgmUnlock(pVM);
2878 return true;
2879 }
2880 pgmUnlock(pVM);
2881 return false;
2882}
2883
2884
2885/**
2886 * Gets the HC physical address of a page in the MMIO2 region.
2887 *
2888 * This is API is intended for MMHyper and shouldn't be called
2889 * by anyone else...
2890 *
2891 * @returns VBox status code.
2892 * @param pVM Pointer to the shared VM structure.
2893 * @param pDevIns The owner of the memory, optional.
2894 * @param iRegion The region.
2895 * @param off The page expressed an offset into the MMIO2 region.
2896 * @param pHCPhys Where to store the result.
2897 */
2898VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
2899{
2900 /*
2901 * Validate input
2902 */
2903 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2904 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2905 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2906
2907 pgmLock(pVM);
2908 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2909 AssertReturn(pCur, VERR_NOT_FOUND);
2910 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2911
2912 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
2913 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
2914 pgmUnlock(pVM);
2915 return VINF_SUCCESS;
2916}
2917
2918
2919/**
2920 * Maps a portion of an MMIO2 region into kernel space (host).
2921 *
2922 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
2923 * or the VM is terminated.
2924 *
2925 * @return VBox status code.
2926 *
2927 * @param pVM Pointer to the shared VM structure.
2928 * @param pDevIns The device owning the MMIO2 memory.
2929 * @param iRegion The region.
2930 * @param off The offset into the region. Must be page aligned.
2931 * @param cb The number of bytes to map. Must be page aligned.
2932 * @param pszDesc Mapping description.
2933 * @param pR0Ptr Where to store the R0 address.
2934 */
2935VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
2936 const char *pszDesc, PRTR0PTR pR0Ptr)
2937{
2938 /*
2939 * Validate input.
2940 */
2941 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2942 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2943 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2944
2945 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2946 AssertReturn(pCur, VERR_NOT_FOUND);
2947 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2948 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2949 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2950 NOREF(pszDesc);
2951
2952 /*
2953 * Pass the request on to the support library/driver.
2954 */
2955 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
2956
2957 return rc;
2958}
2959
2960
2961/**
2962 * Worker for PGMR3PhysRomRegister.
2963 *
2964 * This is here to simplify lock management, i.e. the caller does all the
2965 * locking and we can simply return without needing to remember to unlock
2966 * anything first.
2967 *
2968 * @returns VBox status.
2969 * @param pVM VM Handle.
2970 * @param pDevIns The device instance owning the ROM.
2971 * @param GCPhys First physical address in the range.
2972 * Must be page aligned!
2973 * @param cb The size of the range (in bytes).
2974 * Must be page aligned!
2975 * @param pvBinary Pointer to the binary data backing the ROM image.
2976 * @param cbBinary The size of the binary data pvBinary points to.
2977 * This must be less or equal to @a cb.
2978 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
2979 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
2980 * @param pszDesc Pointer to description string. This must not be freed.
2981 */
2982static int pgmR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
2983 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
2984{
2985 /*
2986 * Validate input.
2987 */
2988 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2989 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
2990 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
2991 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2992 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2993 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
2994 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2995 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
2996 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
2997
2998 const uint32_t cPages = cb >> PAGE_SHIFT;
2999
3000 /*
3001 * Find the ROM location in the ROM list first.
3002 */
3003 PPGMROMRANGE pRomPrev = NULL;
3004 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
3005 while (pRom && GCPhysLast >= pRom->GCPhys)
3006 {
3007 if ( GCPhys <= pRom->GCPhysLast
3008 && GCPhysLast >= pRom->GCPhys)
3009 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
3010 GCPhys, GCPhysLast, pszDesc,
3011 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
3012 VERR_PGM_RAM_CONFLICT);
3013 /* next */
3014 pRomPrev = pRom;
3015 pRom = pRom->pNextR3;
3016 }
3017
3018 /*
3019 * Find the RAM location and check for conflicts.
3020 *
3021 * Conflict detection is a bit different than for RAM
3022 * registration since a ROM can be located within a RAM
3023 * range. So, what we have to check for is other memory
3024 * types (other than RAM that is) and that we don't span
3025 * more than one RAM range (layz).
3026 */
3027 bool fRamExists = false;
3028 PPGMRAMRANGE pRamPrev = NULL;
3029 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
3030 while (pRam && GCPhysLast >= pRam->GCPhys)
3031 {
3032 if ( GCPhys <= pRam->GCPhysLast
3033 && GCPhysLast >= pRam->GCPhys)
3034 {
3035 /* completely within? */
3036 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
3037 && GCPhysLast <= pRam->GCPhysLast,
3038 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
3039 GCPhys, GCPhysLast, pszDesc,
3040 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
3041 VERR_PGM_RAM_CONFLICT);
3042 fRamExists = true;
3043 break;
3044 }
3045
3046 /* next */
3047 pRamPrev = pRam;
3048 pRam = pRam->pNextR3;
3049 }
3050 if (fRamExists)
3051 {
3052 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3053 uint32_t cPagesLeft = cPages;
3054 while (cPagesLeft-- > 0)
3055 {
3056 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
3057 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
3058 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
3059 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
3060 Assert(PGM_PAGE_IS_ZERO(pPage));
3061 pPage++;
3062 }
3063 }
3064
3065 /*
3066 * Update the base memory reservation if necessary.
3067 */
3068 uint32_t cExtraBaseCost = fRamExists ? 0 : cPages;
3069 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3070 cExtraBaseCost += cPages;
3071 if (cExtraBaseCost)
3072 {
3073 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
3074 if (RT_FAILURE(rc))
3075 return rc;
3076 }
3077
3078 /*
3079 * Allocate memory for the virgin copy of the RAM.
3080 */
3081 PGMMALLOCATEPAGESREQ pReq;
3082 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
3083 AssertRCReturn(rc, rc);
3084
3085 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3086 {
3087 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
3088 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
3089 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
3090 }
3091
3092 rc = GMMR3AllocatePagesPerform(pVM, pReq);
3093 if (RT_FAILURE(rc))
3094 {
3095 GMMR3AllocatePagesCleanup(pReq);
3096 return rc;
3097 }
3098
3099 /*
3100 * Allocate the new ROM range and RAM range (if necessary).
3101 */
3102 PPGMROMRANGE pRomNew;
3103 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
3104 if (RT_SUCCESS(rc))
3105 {
3106 PPGMRAMRANGE pRamNew = NULL;
3107 if (!fRamExists)
3108 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
3109 if (RT_SUCCESS(rc))
3110 {
3111 /*
3112 * Initialize and insert the RAM range (if required).
3113 */
3114 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
3115 if (!fRamExists)
3116 {
3117 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
3118 pRamNew->pSelfRC = MMHyperCCToRC(pVM, pRamNew);
3119 pRamNew->GCPhys = GCPhys;
3120 pRamNew->GCPhysLast = GCPhysLast;
3121 pRamNew->cb = cb;
3122 pRamNew->pszDesc = pszDesc;
3123 pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
3124 pRamNew->pvR3 = NULL;
3125 pRamNew->paLSPages = NULL;
3126
3127 PPGMPAGE pPage = &pRamNew->aPages[0];
3128 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
3129 {
3130 PGM_PAGE_INIT(pPage,
3131 pReq->aPages[iPage].HCPhysGCPhys,
3132 pReq->aPages[iPage].idPage,
3133 PGMPAGETYPE_ROM,
3134 PGM_PAGE_STATE_ALLOCATED);
3135
3136 pRomPage->Virgin = *pPage;
3137 }
3138
3139 pVM->pgm.s.cAllPages += cPages;
3140 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
3141 }
3142 else
3143 {
3144 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3145 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
3146 {
3147 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_ROM);
3148 PGM_PAGE_SET_HCPHYS(pVM, pPage, pReq->aPages[iPage].HCPhysGCPhys);
3149 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
3150 PGM_PAGE_SET_PAGEID(pVM, pPage, pReq->aPages[iPage].idPage);
3151 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
3152 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
3153 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
3154
3155 pRomPage->Virgin = *pPage;
3156 }
3157
3158 pRamNew = pRam;
3159
3160 pVM->pgm.s.cZeroPages -= cPages;
3161 }
3162 pVM->pgm.s.cPrivatePages += cPages;
3163
3164 /* Flush physical page map TLB. */
3165 pgmPhysInvalidatePageMapTLB(pVM);
3166
3167
3168 /*
3169 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
3170 *
3171 * If it's shadowed we'll register the handler after the ROM notification
3172 * so we get the access handler callbacks that we should. If it isn't
3173 * shadowed we'll do it the other way around to make REM use the built-in
3174 * ROM behavior and not the handler behavior (which is to route all access
3175 * to PGM atm).
3176 */
3177 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3178 {
3179 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
3180 rc = PGMR3HandlerPhysicalRegister(pVM,
3181 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
3182 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
3183 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
3184 GCPhys, GCPhysLast,
3185 pgmR3PhysRomWriteHandler, pRomNew,
3186 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
3187 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
3188 }
3189 else
3190 {
3191 rc = PGMR3HandlerPhysicalRegister(pVM,
3192 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
3193 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
3194 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
3195 GCPhys, GCPhysLast,
3196 pgmR3PhysRomWriteHandler, pRomNew,
3197 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
3198 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
3199 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
3200 }
3201 if (RT_SUCCESS(rc))
3202 {
3203 /*
3204 * Copy the image over to the virgin pages.
3205 * This must be done after linking in the RAM range.
3206 */
3207 size_t cbBinaryLeft = cbBinary;
3208 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
3209 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
3210 {
3211 void *pvDstPage;
3212 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pvDstPage);
3213 if (RT_FAILURE(rc))
3214 {
3215 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
3216 break;
3217 }
3218 if (cbBinaryLeft >= PAGE_SIZE)
3219 {
3220 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), PAGE_SIZE);
3221 cbBinaryLeft -= PAGE_SIZE;
3222 }
3223 else
3224 {
3225 ASMMemZeroPage(pvDstPage); /* (shouldn't be necessary, but can't hurt either) */
3226 if (cbBinaryLeft > 0)
3227 {
3228 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), cbBinaryLeft);
3229 cbBinaryLeft = 0;
3230 }
3231 }
3232 }
3233 if (RT_SUCCESS(rc))
3234 {
3235 /*
3236 * Initialize the ROM range.
3237 * Note that the Virgin member of the pages has already been initialized above.
3238 */
3239 pRomNew->GCPhys = GCPhys;
3240 pRomNew->GCPhysLast = GCPhysLast;
3241 pRomNew->cb = cb;
3242 pRomNew->fFlags = fFlags;
3243 pRomNew->idSavedState = UINT8_MAX;
3244 pRomNew->cbOriginal = cbBinary;
3245#ifdef VBOX_STRICT
3246 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY
3247 ? pvBinary : RTMemDup(pvBinary, cbBinary);
3248#else
3249 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY ? pvBinary : NULL;
3250#endif
3251 pRomNew->pszDesc = pszDesc;
3252
3253 for (unsigned iPage = 0; iPage < cPages; iPage++)
3254 {
3255 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
3256 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
3257 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
3258 }
3259
3260 /* update the page count stats for the shadow pages. */
3261 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3262 {
3263 pVM->pgm.s.cZeroPages += cPages;
3264 pVM->pgm.s.cAllPages += cPages;
3265 }
3266
3267 /*
3268 * Insert the ROM range, tell REM and return successfully.
3269 */
3270 pRomNew->pNextR3 = pRom;
3271 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
3272 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
3273
3274 if (pRomPrev)
3275 {
3276 pRomPrev->pNextR3 = pRomNew;
3277 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
3278 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
3279 }
3280 else
3281 {
3282 pVM->pgm.s.pRomRangesR3 = pRomNew;
3283 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
3284 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
3285 }
3286
3287 pgmPhysInvalidatePageMapTLB(pVM);
3288 GMMR3AllocatePagesCleanup(pReq);
3289 return VINF_SUCCESS;
3290 }
3291
3292 /* bail out */
3293
3294 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
3295 AssertRC(rc2);
3296 }
3297
3298 if (!fRamExists)
3299 {
3300 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
3301 MMHyperFree(pVM, pRamNew);
3302 }
3303 }
3304 MMHyperFree(pVM, pRomNew);
3305 }
3306
3307 /** @todo Purge the mapping cache or something... */
3308 GMMR3FreeAllocatedPages(pVM, pReq);
3309 GMMR3AllocatePagesCleanup(pReq);
3310 return rc;
3311}
3312
3313
3314/**
3315 * Registers a ROM image.
3316 *
3317 * Shadowed ROM images requires double the amount of backing memory, so,
3318 * don't use that unless you have to. Shadowing of ROM images is process
3319 * where we can select where the reads go and where the writes go. On real
3320 * hardware the chipset provides means to configure this. We provide
3321 * PGMR3PhysProtectROM() for this purpose.
3322 *
3323 * A read-only copy of the ROM image will always be kept around while we
3324 * will allocate RAM pages for the changes on demand (unless all memory
3325 * is configured to be preallocated).
3326 *
3327 * @returns VBox status.
3328 * @param pVM VM Handle.
3329 * @param pDevIns The device instance owning the ROM.
3330 * @param GCPhys First physical address in the range.
3331 * Must be page aligned!
3332 * @param cb The size of the range (in bytes).
3333 * Must be page aligned!
3334 * @param pvBinary Pointer to the binary data backing the ROM image.
3335 * @param cbBinary The size of the binary data pvBinary points to.
3336 * This must be less or equal to @a cb.
3337 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
3338 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
3339 * @param pszDesc Pointer to description string. This must not be freed.
3340 *
3341 * @remark There is no way to remove the rom, automatically on device cleanup or
3342 * manually from the device yet. This isn't difficult in any way, it's
3343 * just not something we expect to be necessary for a while.
3344 */
3345VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
3346 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
3347{
3348 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p cbBinary=%#x fFlags=%#x pszDesc=%s\n",
3349 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc));
3350 pgmLock(pVM);
3351 int rc = pgmR3PhysRomRegister(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc);
3352 pgmUnlock(pVM);
3353 return rc;
3354}
3355
3356
3357/**
3358 * \#PF Handler callback for ROM write accesses.
3359 *
3360 * @returns VINF_SUCCESS if the handler have carried out the operation.
3361 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
3362 * @param pVM VM Handle.
3363 * @param GCPhys The physical address the guest is writing to.
3364 * @param pvPhys The HC mapping of that address.
3365 * @param pvBuf What the guest is reading/writing.
3366 * @param cbBuf How much it's reading/writing.
3367 * @param enmAccessType The access type.
3368 * @param pvUser User argument.
3369 */
3370static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
3371 PGMACCESSTYPE enmAccessType, void *pvUser)
3372{
3373 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
3374 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
3375 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
3376 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
3377 Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
3378 NOREF(pvPhys);
3379
3380 if (enmAccessType == PGMACCESSTYPE_READ)
3381 {
3382 switch (pRomPage->enmProt)
3383 {
3384 /*
3385 * Take the default action.
3386 */
3387 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
3388 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
3389 case PGMROMPROT_READ_ROM_WRITE_RAM:
3390 case PGMROMPROT_READ_RAM_WRITE_RAM:
3391 return VINF_PGM_HANDLER_DO_DEFAULT;
3392
3393 default:
3394 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
3395 pRom->aPages[iPage].enmProt, iPage, GCPhys),
3396 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
3397 }
3398 }
3399 else
3400 {
3401 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
3402 switch (pRomPage->enmProt)
3403 {
3404 /*
3405 * Ignore writes.
3406 */
3407 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
3408 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
3409 return VINF_SUCCESS;
3410
3411 /*
3412 * Write to the RAM page.
3413 */
3414 case PGMROMPROT_READ_ROM_WRITE_RAM:
3415 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
3416 {
3417 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
3418 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
3419
3420 /*
3421 * Take the lock, do lazy allocation, map the page and copy the data.
3422 *
3423 * Note that we have to bypass the mapping TLB since it works on
3424 * guest physical addresses and entering the shadow page would
3425 * kind of screw things up...
3426 */
3427 int rc = pgmLock(pVM);
3428 AssertRC(rc);
3429
3430 PPGMPAGE pShadowPage = &pRomPage->Shadow;
3431 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
3432 {
3433 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
3434 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE);
3435 }
3436
3437 void *pvDstPage;
3438 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
3439 if (RT_SUCCESS(rc))
3440 {
3441 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
3442 pRomPage->LiveSave.fWrittenTo = true;
3443 }
3444
3445 pgmUnlock(pVM);
3446 return rc;
3447 }
3448
3449 default:
3450 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
3451 pRom->aPages[iPage].enmProt, iPage, GCPhys),
3452 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
3453 }
3454 }
3455}
3456
3457
3458/**
3459 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
3460 * and verify that the virgin part is untouched.
3461 *
3462 * This is done after the normal memory has been cleared.
3463 *
3464 * ASSUMES that the caller owns the PGM lock.
3465 *
3466 * @param pVM The VM handle.
3467 */
3468int pgmR3PhysRomReset(PVM pVM)
3469{
3470 PGM_LOCK_ASSERT_OWNER(pVM);
3471 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
3472 {
3473 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
3474
3475 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3476 {
3477 /*
3478 * Reset the physical handler.
3479 */
3480 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
3481 AssertRCReturn(rc, rc);
3482
3483 /*
3484 * What we do with the shadow pages depends on the memory
3485 * preallocation option. If not enabled, we'll just throw
3486 * out all the dirty pages and replace them by the zero page.
3487 */
3488 if (!pVM->pgm.s.fRamPreAlloc)
3489 {
3490 /* Free the dirty pages. */
3491 uint32_t cPendingPages = 0;
3492 PGMMFREEPAGESREQ pReq;
3493 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
3494 AssertRCReturn(rc, rc);
3495
3496 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3497 if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
3498 && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
3499 {
3500 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
3501 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow,
3502 pRom->GCPhys + (iPage << PAGE_SHIFT));
3503 AssertLogRelRCReturn(rc, rc);
3504 }
3505
3506 if (cPendingPages)
3507 {
3508 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
3509 AssertLogRelRCReturn(rc, rc);
3510 }
3511 GMMR3FreePagesCleanup(pReq);
3512 }
3513 else
3514 {
3515 /* clear all the shadow pages. */
3516 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3517 {
3518 if (PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow))
3519 continue;
3520 Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
3521 void *pvDstPage;
3522 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
3523 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
3524 if (RT_FAILURE(rc))
3525 break;
3526 ASMMemZeroPage(pvDstPage);
3527 }
3528 AssertRCReturn(rc, rc);
3529 }
3530 }
3531
3532#ifdef VBOX_STRICT
3533 /*
3534 * Verify that the virgin page is unchanged if possible.
3535 */
3536 if (pRom->pvOriginal)
3537 {
3538 size_t cbSrcLeft = pRom->cbOriginal;
3539 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
3540 for (uint32_t iPage = 0; iPage < cPages && cbSrcLeft > 0; iPage++, pbSrcPage += PAGE_SIZE)
3541 {
3542 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
3543 void const *pvDstPage;
3544 int rc = pgmPhysPageMapReadOnly(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPage);
3545 if (RT_FAILURE(rc))
3546 break;
3547
3548 if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE)))
3549 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
3550 GCPhys, pRom->pszDesc));
3551 cbSrcLeft -= RT_MIN(cbSrcLeft, PAGE_SIZE);
3552 }
3553 }
3554#endif
3555 }
3556
3557 return VINF_SUCCESS;
3558}
3559
3560
3561/**
3562 * Called by PGMR3Term to free resources.
3563 *
3564 * ASSUMES that the caller owns the PGM lock.
3565 *
3566 * @param pVM The VM handle.
3567 */
3568void pgmR3PhysRomTerm(PVM pVM)
3569{
3570#ifdef RT_STRICT
3571 /*
3572 * Free the heap copy of the original bits.
3573 */
3574 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
3575 {
3576 if ( pRom->pvOriginal
3577 && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY))
3578 {
3579 RTMemFree((void *)pRom->pvOriginal);
3580 pRom->pvOriginal = NULL;
3581 }
3582 }
3583#endif
3584}
3585
3586
3587/**
3588 * Change the shadowing of a range of ROM pages.
3589 *
3590 * This is intended for implementing chipset specific memory registers
3591 * and will not be very strict about the input. It will silently ignore
3592 * any pages that are not the part of a shadowed ROM.
3593 *
3594 * @returns VBox status code.
3595 * @retval VINF_PGM_SYNC_CR3
3596 *
3597 * @param pVM Pointer to the shared VM structure.
3598 * @param GCPhys Where to start. Page aligned.
3599 * @param cb How much to change. Page aligned.
3600 * @param enmProt The new ROM protection.
3601 */
3602VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
3603{
3604 /*
3605 * Check input
3606 */
3607 if (!cb)
3608 return VINF_SUCCESS;
3609 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3610 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3611 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
3612 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
3613 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
3614
3615 /*
3616 * Process the request.
3617 */
3618 pgmLock(pVM);
3619 int rc = VINF_SUCCESS;
3620 bool fFlushTLB = false;
3621 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
3622 {
3623 if ( GCPhys <= pRom->GCPhysLast
3624 && GCPhysLast >= pRom->GCPhys
3625 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
3626 {
3627 /*
3628 * Iterate the relevant pages and make necessary the changes.
3629 */
3630 bool fChanges = false;
3631 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
3632 ? pRom->cb >> PAGE_SHIFT
3633 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
3634 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
3635 iPage < cPages;
3636 iPage++)
3637 {
3638 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
3639 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
3640 {
3641 fChanges = true;
3642
3643 /* flush references to the page. */
3644 PPGMPAGE pRamPage = pgmPhysGetPage(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT));
3645 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage,
3646 true /*fFlushPTEs*/, &fFlushTLB);
3647 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
3648 rc = rc2;
3649
3650 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
3651 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
3652
3653 *pOld = *pRamPage;
3654 *pRamPage = *pNew;
3655 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
3656 }
3657 pRomPage->enmProt = enmProt;
3658 }
3659
3660 /*
3661 * Reset the access handler if we made changes, no need
3662 * to optimize this.
3663 */
3664 if (fChanges)
3665 {
3666 int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
3667 if (RT_FAILURE(rc2))
3668 {
3669 pgmUnlock(pVM);
3670 AssertRC(rc);
3671 return rc2;
3672 }
3673 }
3674
3675 /* Advance - cb isn't updated. */
3676 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
3677 }
3678 }
3679 pgmUnlock(pVM);
3680 if (fFlushTLB)
3681 PGM_INVL_ALL_VCPU_TLBS(pVM);
3682
3683 return rc;
3684}
3685
3686
3687/**
3688 * Sets the Address Gate 20 state.
3689 *
3690 * @param pVCpu The VCPU to operate on.
3691 * @param fEnable True if the gate should be enabled.
3692 * False if the gate should be disabled.
3693 */
3694VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
3695{
3696 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
3697 if (pVCpu->pgm.s.fA20Enabled != fEnable)
3698 {
3699 pVCpu->pgm.s.fA20Enabled = fEnable;
3700 pVCpu->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
3701 REMR3A20Set(pVCpu->pVMR3, pVCpu, fEnable);
3702 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
3703 }
3704}
3705
3706
3707/**
3708 * Tree enumeration callback for dealing with age rollover.
3709 * It will perform a simple compression of the current age.
3710 */
3711static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
3712{
3713 /* Age compression - ASSUMES iNow == 4. */
3714 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3715 if (pChunk->iLastUsed >= UINT32_C(0xffffff00))
3716 pChunk->iLastUsed = 3;
3717 else if (pChunk->iLastUsed >= UINT32_C(0xfffff000))
3718 pChunk->iLastUsed = 2;
3719 else if (pChunk->iLastUsed)
3720 pChunk->iLastUsed = 1;
3721 else /* iLastUsed = 0 */
3722 pChunk->iLastUsed = 4;
3723
3724 NOREF(pvUser);
3725 return 0;
3726}
3727
3728
3729/**
3730 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
3731 */
3732typedef struct PGMR3PHYSCHUNKUNMAPCB
3733{
3734 PVM pVM; /**< The VM handle. */
3735 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
3736} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
3737
3738
3739/**
3740 * Callback used to find the mapping that's been unused for
3741 * the longest time.
3742 */
3743static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLU32NODECORE pNode, void *pvUser)
3744{
3745 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3746 PPGMR3PHYSCHUNKUNMAPCB pArg = (PPGMR3PHYSCHUNKUNMAPCB)pvUser;
3747
3748 /*
3749 * Check for locks and compare when last used.
3750 */
3751 if (pChunk->cRefs)
3752 return 0;
3753 if (pChunk->cPermRefs)
3754 return 0;
3755 if ( pArg->pChunk
3756 && pChunk->iLastUsed >= pArg->pChunk->iLastUsed)
3757 return 0;
3758
3759 /*
3760 * Check that it's not in any of the TLBs.
3761 */
3762 PVM pVM = pArg->pVM;
3763 if ( pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(pChunk->Core.Key)].idChunk
3764 == pChunk->Core.Key)
3765 {
3766 pChunk = NULL;
3767 return 0;
3768 }
3769#ifdef VBOX_STRICT
3770 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3771 {
3772 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk != pChunk);
3773 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key);
3774 }
3775#endif
3776
3777 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
3778 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
3779 return 0;
3780
3781 pArg->pChunk = pChunk;
3782 return 0;
3783}
3784
3785
3786/**
3787 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
3788 *
3789 * The candidate will not be part of any TLBs, so no need to flush
3790 * anything afterwards.
3791 *
3792 * @returns Chunk id.
3793 * @param pVM The VM handle.
3794 */
3795static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
3796{
3797 PGM_LOCK_ASSERT_OWNER(pVM);
3798
3799 /*
3800 * Enumerate the age tree starting with the left most node.
3801 */
3802 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
3803 PGMR3PHYSCHUNKUNMAPCB Args;
3804 Args.pVM = pVM;
3805 Args.pChunk = NULL;
3806 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, &Args);
3807 Assert(Args.pChunk);
3808 if (Args.pChunk)
3809 {
3810 Assert(Args.pChunk->cRefs == 0);
3811 Assert(Args.pChunk->cPermRefs == 0);
3812 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
3813 return Args.pChunk->Core.Key;
3814 }
3815
3816 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
3817 return INT32_MAX;
3818}
3819
3820
3821/**
3822 * Rendezvous callback used by pgmR3PhysUnmapChunk that unmaps a chunk
3823 *
3824 * This is only called on one of the EMTs while the other ones are waiting for
3825 * it to complete this function.
3826 *
3827 * @returns VINF_SUCCESS (VBox strict status code).
3828 * @param pVM The VM handle.
3829 * @param pVCpu The VMCPU for the EMT we're being called on. Unused.
3830 * @param pvUser User pointer. Unused
3831 *
3832 */
3833static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysUnmapChunkRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
3834{
3835 int rc = VINF_SUCCESS;
3836 pgmLock(pVM);
3837 NOREF(pVCpu); NOREF(pvUser);
3838
3839 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
3840 {
3841 /* Flush the pgm pool cache; call the internal rendezvous handler as we're already in a rendezvous handler here. */
3842 /** @todo also not really efficient to unmap a chunk that contains PD
3843 * or PT pages. */
3844 pgmR3PoolClearAllRendezvous(pVM, &pVM->aCpus[0], NULL /* no need to flush the REM TLB as we already did that above */);
3845
3846 /*
3847 * Request the ring-0 part to unmap a chunk to make space in the mapping cache.
3848 */
3849 GMMMAPUNMAPCHUNKREQ Req;
3850 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3851 Req.Hdr.cbReq = sizeof(Req);
3852 Req.pvR3 = NULL;
3853 Req.idChunkMap = NIL_GMM_CHUNKID;
3854 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
3855 if (Req.idChunkUnmap != INT32_MAX)
3856 {
3857 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
3858 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
3859 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
3860 if (RT_SUCCESS(rc))
3861 {
3862 /*
3863 * Remove the unmapped one.
3864 */
3865 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
3866 AssertRelease(pUnmappedChunk);
3867 AssertRelease(!pUnmappedChunk->cRefs);
3868 AssertRelease(!pUnmappedChunk->cPermRefs);
3869 pUnmappedChunk->pv = NULL;
3870 pUnmappedChunk->Core.Key = UINT32_MAX;
3871#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3872 MMR3HeapFree(pUnmappedChunk);
3873#else
3874 MMR3UkHeapFree(pVM, pUnmappedChunk, MM_TAG_PGM_CHUNK_MAPPING);
3875#endif
3876 pVM->pgm.s.ChunkR3Map.c--;
3877 pVM->pgm.s.cUnmappedChunks++;
3878
3879 /*
3880 * Flush dangling PGM pointers (R3 & R0 ptrs to GC physical addresses).
3881 */
3882 /** todo: we should not flush chunks which include cr3 mappings. */
3883 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3884 {
3885 PPGMCPU pPGM = &pVM->aCpus[idCpu].pgm.s;
3886
3887 pPGM->pGst32BitPdR3 = NULL;
3888 pPGM->pGstPaePdptR3 = NULL;
3889 pPGM->pGstAmd64Pml4R3 = NULL;
3890#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
3891 pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
3892 pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
3893 pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
3894#endif
3895 for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++)
3896 {
3897 pPGM->apGstPaePDsR3[i] = NULL;
3898#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
3899 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
3900#endif
3901 }
3902
3903 /* Flush REM TLBs. */
3904 CPUMSetChangedFlags(&pVM->aCpus[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
3905 }
3906
3907 /* Flush REM translation blocks. */
3908 REMFlushTBs(pVM);
3909 }
3910 }
3911 }
3912 pgmUnlock(pVM);
3913 return rc;
3914}
3915
3916/**
3917 * Unmap a chunk to free up virtual address space (request packet handler for pgmR3PhysChunkMap)
3918 *
3919 * @returns VBox status code.
3920 * @param pVM The VM to operate on.
3921 */
3922void pgmR3PhysUnmapChunk(PVM pVM)
3923{
3924 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysUnmapChunkRendezvous, NULL);
3925 AssertRC(rc);
3926}
3927
3928
3929/**
3930 * Maps the given chunk into the ring-3 mapping cache.
3931 *
3932 * This will call ring-0.
3933 *
3934 * @returns VBox status code.
3935 * @param pVM The VM handle.
3936 * @param idChunk The chunk in question.
3937 * @param ppChunk Where to store the chunk tracking structure.
3938 *
3939 * @remarks Called from within the PGM critical section.
3940 * @remarks Can be called from any thread!
3941 */
3942int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
3943{
3944 int rc;
3945
3946 PGM_LOCK_ASSERT_OWNER(pVM);
3947
3948 /*
3949 * Move the chunk time forward.
3950 */
3951 pVM->pgm.s.ChunkR3Map.iNow++;
3952 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
3953 {
3954 pVM->pgm.s.ChunkR3Map.iNow = 4;
3955 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
3956 }
3957
3958 /*
3959 * Allocate a new tracking structure first.
3960 */
3961#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3962 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
3963#else
3964 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
3965#endif
3966 AssertReturn(pChunk, VERR_NO_MEMORY);
3967 pChunk->Core.Key = idChunk;
3968 pChunk->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
3969
3970 /*
3971 * Request the ring-0 part to map the chunk in question.
3972 */
3973 GMMMAPUNMAPCHUNKREQ Req;
3974 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3975 Req.Hdr.cbReq = sizeof(Req);
3976 Req.pvR3 = NULL;
3977 Req.idChunkMap = idChunk;
3978 Req.idChunkUnmap = NIL_GMM_CHUNKID;
3979
3980 /* Must be callable from any thread, so can't use VMMR3CallR0. */
3981 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
3982 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
3983 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
3984 if (RT_SUCCESS(rc))
3985 {
3986 pChunk->pv = Req.pvR3;
3987
3988 /*
3989 * If we're running out of virtual address space, then we should
3990 * unmap another chunk.
3991 *
3992 * Currently, an unmap operation requires that all other virtual CPUs
3993 * are idling and not by chance making use of the memory we're
3994 * unmapping. So, we create an async unmap operation here.
3995 *
3996 * Now, when creating or restoring a saved state this wont work very
3997 * well since we may want to restore all guest RAM + a little something.
3998 * So, we have to do the unmap synchronously. Fortunately for us
3999 * though, during these operations the other virtual CPUs are inactive
4000 * and it should be safe to do this.
4001 */
4002 /** @todo Eventually we should lock all memory when used and do
4003 * map+unmap as one kernel call without any rendezvous or
4004 * other precautions. */
4005 if (pVM->pgm.s.ChunkR3Map.c + 1 >= pVM->pgm.s.ChunkR3Map.cMax)
4006 {
4007 switch (VMR3GetState(pVM))
4008 {
4009 case VMSTATE_LOADING:
4010 case VMSTATE_SAVING:
4011 {
4012 PVMCPU pVCpu = VMMGetCpu(pVM);
4013 if ( pVCpu
4014 && pVM->pgm.s.cDeprecatedPageLocks == 0)
4015 {
4016 pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
4017 break;
4018 }
4019 /* fall thru */
4020 }
4021 default:
4022 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
4023 AssertRC(rc);
4024 break;
4025 }
4026 }
4027
4028 /*
4029 * Update the tree. We must do this after any unmapping to make sure
4030 * the chunk we're going to return isn't unmapped by accident.
4031 */
4032 AssertPtr(Req.pvR3);
4033 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
4034 AssertRelease(fRc);
4035 pVM->pgm.s.ChunkR3Map.c++;
4036 pVM->pgm.s.cMappedChunks++;
4037 }
4038 else
4039 {
4040 /** @todo this may fail because of /proc/sys/vm/max_map_count, so we
4041 * should probably restrict ourselves on linux. */
4042 AssertRC(rc);
4043#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
4044 MMR3HeapFree(pChunk);
4045#else
4046 MMR3UkHeapFree(pVM, pChunk, MM_TAG_PGM_CHUNK_MAPPING);
4047#endif
4048 pChunk = NULL;
4049 }
4050
4051 *ppChunk = pChunk;
4052 return rc;
4053}
4054
4055
4056/**
4057 * For VMMCALLRING3_PGM_MAP_CHUNK, considered internal.
4058 *
4059 * @returns see pgmR3PhysChunkMap.
4060 * @param pVM The VM handle.
4061 * @param idChunk The chunk to map.
4062 */
4063VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
4064{
4065 PPGMCHUNKR3MAP pChunk;
4066 int rc;
4067
4068 pgmLock(pVM);
4069 rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
4070 pgmUnlock(pVM);
4071 return rc;
4072}
4073
4074
4075/**
4076 * Invalidates the TLB for the ring-3 mapping cache.
4077 *
4078 * @param pVM The VM handle.
4079 */
4080VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
4081{
4082 pgmLock(pVM);
4083 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
4084 {
4085 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
4086 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
4087 }
4088 /* The page map TLB references chunks, so invalidate that one too. */
4089 pgmPhysInvalidatePageMapTLB(pVM);
4090 pgmUnlock(pVM);
4091}
4092
4093
4094/**
4095 * Response to VMMCALLRING3_PGM_ALLOCATE_LARGE_PAGE to allocate a large (2MB) page
4096 * for use with a nested paging PDE.
4097 *
4098 * @returns The following VBox status codes.
4099 * @retval VINF_SUCCESS on success.
4100 * @retval VINF_EM_NO_MEMORY if we're out of memory.
4101 *
4102 * @param pVM The VM handle.
4103 * @param GCPhys GC physical start address of the 2 MB range
4104 */
4105VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
4106{
4107#ifdef PGM_WITH_LARGE_PAGES
4108 uint64_t u64TimeStamp1, u64TimeStamp2;
4109
4110 pgmLock(pVM);
4111
4112 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
4113 u64TimeStamp1 = RTTimeMilliTS();
4114 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE, 0, NULL);
4115 u64TimeStamp2 = RTTimeMilliTS();
4116 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
4117 if (RT_SUCCESS(rc))
4118 {
4119 Assert(pVM->pgm.s.cLargeHandyPages == 1);
4120
4121 uint32_t idPage = pVM->pgm.s.aLargeHandyPage[0].idPage;
4122 RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys;
4123
4124 void *pv;
4125
4126 /* Map the large page into our address space.
4127 *
4128 * Note: assuming that within the 2 MB range:
4129 * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise)
4130 * - user space mapping is continuous as well
4131 * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE)
4132 */
4133 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
4134 AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n", idPage, HCPhys, rc));
4135
4136 if (RT_SUCCESS(rc))
4137 {
4138 /*
4139 * Clear the pages.
4140 */
4141 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
4142 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
4143 {
4144 ASMMemZeroPage(pv);
4145
4146 PPGMPAGE pPage;
4147 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4148 AssertRC(rc);
4149
4150 Assert(PGM_PAGE_IS_ZERO(pPage));
4151 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
4152 pVM->pgm.s.cZeroPages--;
4153
4154 /*
4155 * Do the PGMPAGE modifications.
4156 */
4157 pVM->pgm.s.cPrivatePages++;
4158 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
4159 PGM_PAGE_SET_PAGEID(pVM, pPage, idPage);
4160 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
4161 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PDE);
4162 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
4163 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
4164
4165 /* Somewhat dirty assumption that page ids are increasing. */
4166 idPage++;
4167
4168 HCPhys += PAGE_SIZE;
4169 GCPhys += PAGE_SIZE;
4170
4171 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
4172
4173 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
4174 }
4175 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
4176
4177 /* Flush all TLBs. */
4178 PGM_INVL_ALL_VCPU_TLBS(pVM);
4179 pgmPhysInvalidatePageMapTLB(pVM);
4180 }
4181 pVM->pgm.s.cLargeHandyPages = 0;
4182 }
4183
4184 if (RT_SUCCESS(rc))
4185 {
4186 static uint32_t cTimeOut = 0;
4187 uint64_t u64TimeStampDelta = u64TimeStamp2 - u64TimeStamp1;
4188
4189 if (u64TimeStampDelta > 100)
4190 {
4191 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatLargePageOverflow);
4192 if ( ++cTimeOut > 10
4193 || u64TimeStampDelta > 1000 /* more than one second forces an early retirement from allocating large pages. */)
4194 {
4195 /* If repeated attempts to allocate a large page takes more than 100 ms, then we fall back to normal 4k pages.
4196 * E.g. Vista 64 tries to move memory around, which takes a huge amount of time.
4197 */
4198 LogRel(("PGMR3PhysAllocateLargePage: allocating large pages takes too long (last attempt %d ms; nr of timeouts %d); DISABLE\n", u64TimeStampDelta, cTimeOut));
4199 PGMSetLargePageUsage(pVM, false);
4200 }
4201 }
4202 else
4203 if (cTimeOut > 0)
4204 cTimeOut--;
4205 }
4206
4207 pgmUnlock(pVM);
4208 return rc;
4209#else
4210 return VERR_NOT_IMPLEMENTED;
4211#endif /* PGM_WITH_LARGE_PAGES */
4212}
4213
4214
4215/**
4216 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES.
4217 *
4218 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
4219 * signal and clear the out of memory condition. When contracted, this API is
4220 * used to try clear the condition when the user wants to resume.
4221 *
4222 * @returns The following VBox status codes.
4223 * @retval VINF_SUCCESS on success. FFs cleared.
4224 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
4225 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
4226 *
4227 * @param pVM The VM handle.
4228 *
4229 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
4230 * in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
4231 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
4232 * handler.
4233 */
4234VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
4235{
4236 pgmLock(pVM);
4237
4238 /*
4239 * Allocate more pages, noting down the index of the first new page.
4240 */
4241 uint32_t iClear = pVM->pgm.s.cHandyPages;
4242 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_PGM_HANDY_PAGE_IPE);
4243 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
4244 int rcAlloc = VINF_SUCCESS;
4245 int rcSeed = VINF_SUCCESS;
4246 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
4247 while (rc == VERR_GMM_SEED_ME)
4248 {
4249 void *pvChunk;
4250 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
4251 if (RT_SUCCESS(rc))
4252 {
4253 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
4254 if (RT_FAILURE(rc))
4255 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
4256 }
4257 if (RT_SUCCESS(rc))
4258 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
4259 }
4260
4261 /* todo: we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
4262 if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
4263 && pVM->pgm.s.cHandyPages > 0)
4264 {
4265 /* Still handy pages left, so don't panic. */
4266 rc = VINF_SUCCESS;
4267 }
4268
4269 if (RT_SUCCESS(rc))
4270 {
4271 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
4272 Assert(pVM->pgm.s.cHandyPages > 0);
4273 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
4274 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
4275
4276#ifdef VBOX_STRICT
4277 uint32_t i;
4278 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++)
4279 if ( pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID
4280 || pVM->pgm.s.aHandyPages[i].idSharedPage != NIL_GMM_PAGEID
4281 || (pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & PAGE_OFFSET_MASK))
4282 break;
4283 if (i != pVM->pgm.s.cHandyPages)
4284 {
4285 RTAssertMsg1Weak(NULL, __LINE__, __FILE__, __FUNCTION__);
4286 RTAssertMsg2Weak("i=%d iClear=%d cHandyPages=%d\n", i, iClear, pVM->pgm.s.cHandyPages);
4287 for (uint32_t j = iClear; j < pVM->pgm.s.cHandyPages; j++)
4288 RTAssertMsg2Add(("%03d: idPage=%d HCPhysGCPhys=%RHp idSharedPage=%d%\n", j,
4289 pVM->pgm.s.aHandyPages[j].idPage,
4290 pVM->pgm.s.aHandyPages[j].HCPhysGCPhys,
4291 pVM->pgm.s.aHandyPages[j].idSharedPage,
4292 j == i ? " <---" : ""));
4293 RTAssertPanic();
4294 }
4295#endif
4296 /*
4297 * Clear the pages.
4298 */
4299 while (iClear < pVM->pgm.s.cHandyPages)
4300 {
4301 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
4302 void *pv;
4303 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
4304 AssertLogRelMsgBreak(RT_SUCCESS(rc),
4305 ("%u/%u: idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n",
4306 iClear, pVM->pgm.s.cHandyPages, pPage->idPage, pPage->HCPhysGCPhys, rc));
4307 ASMMemZeroPage(pv);
4308 iClear++;
4309 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
4310 }
4311 }
4312 else
4313 {
4314 uint64_t cAllocPages, cMaxPages, cBalloonPages;
4315
4316 /*
4317 * We should never get here unless there is a genuine shortage of
4318 * memory (or some internal error). Flag the error so the VM can be
4319 * suspended ASAP and the user informed. If we're totally out of
4320 * handy pages we will return failure.
4321 */
4322 /* Report the failure. */
4323 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
4324 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
4325 rc, rcAlloc, rcSeed,
4326 pVM->pgm.s.cHandyPages,
4327 pVM->pgm.s.cAllPages,
4328 pVM->pgm.s.cPrivatePages,
4329 pVM->pgm.s.cSharedPages,
4330 pVM->pgm.s.cZeroPages));
4331
4332 if (GMMR3QueryMemoryStats(pVM, &cAllocPages, &cMaxPages, &cBalloonPages) == VINF_SUCCESS)
4333 {
4334 LogRel(("GMM: Statistics:\n"
4335 " Allocated pages: %RX64\n"
4336 " Maximum pages: %RX64\n"
4337 " Ballooned pages: %RX64\n", cAllocPages, cMaxPages, cBalloonPages));
4338 }
4339
4340 if ( rc != VERR_NO_MEMORY
4341 && rc != VERR_LOCK_FAILED)
4342 {
4343 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
4344 {
4345 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
4346 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
4347 pVM->pgm.s.aHandyPages[i].idSharedPage));
4348 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
4349 if (idPage != NIL_GMM_PAGEID)
4350 {
4351 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
4352 pRam;
4353 pRam = pRam->pNextR3)
4354 {
4355 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
4356 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4357 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
4358 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
4359 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
4360 }
4361 }
4362 }
4363 }
4364
4365 /* Set the FFs and adjust rc. */
4366 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
4367 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
4368 if ( rc == VERR_NO_MEMORY
4369 || rc == VERR_LOCK_FAILED)
4370 rc = VINF_EM_NO_MEMORY;
4371 }
4372
4373 pgmUnlock(pVM);
4374 return rc;
4375}
4376
4377
4378/**
4379 * Frees the specified RAM page and replaces it with the ZERO page.
4380 *
4381 * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
4382 *
4383 * @param pVM Pointer to the shared VM structure.
4384 * @param pReq Pointer to the request.
4385 * @param pcPendingPages Where the number of pages waiting to be freed are
4386 * kept. This will normally be incremented.
4387 * @param pPage Pointer to the page structure.
4388 * @param GCPhys The guest physical address of the page, if applicable.
4389 *
4390 * @remarks The caller must own the PGM lock.
4391 */
4392int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
4393{
4394 /*
4395 * Assert sanity.
4396 */
4397 PGM_LOCK_ASSERT_OWNER(pVM);
4398 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
4399 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
4400 {
4401 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
4402 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
4403 }
4404
4405 /** @todo What about ballooning of large pages??! */
4406 Assert( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
4407 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
4408
4409 if ( PGM_PAGE_IS_ZERO(pPage)
4410 || PGM_PAGE_IS_BALLOONED(pPage))
4411 return VINF_SUCCESS;
4412
4413 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
4414 Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
4415 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
4416 || idPage > GMM_PAGEID_LAST
4417 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
4418 {
4419 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
4420 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
4421 }
4422
4423 /* update page count stats. */
4424 if (PGM_PAGE_IS_SHARED(pPage))
4425 pVM->pgm.s.cSharedPages--;
4426 else
4427 pVM->pgm.s.cPrivatePages--;
4428 pVM->pgm.s.cZeroPages++;
4429
4430 /* Deal with write monitored pages. */
4431 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
4432 {
4433 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
4434 pVM->pgm.s.cWrittenToPages++;
4435 }
4436
4437 /*
4438 * pPage = ZERO page.
4439 */
4440 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
4441 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
4442 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
4443 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
4444 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
4445 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
4446
4447 /* Flush physical page map TLB entry. */
4448 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
4449
4450 /*
4451 * Make sure it's not in the handy page array.
4452 */
4453 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
4454 {
4455 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
4456 {
4457 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
4458 break;
4459 }
4460 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
4461 {
4462 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
4463 break;
4464 }
4465 }
4466
4467 /*
4468 * Push it onto the page array.
4469 */
4470 uint32_t iPage = *pcPendingPages;
4471 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
4472 *pcPendingPages += 1;
4473
4474 pReq->aPages[iPage].idPage = idPage;
4475
4476 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
4477 return VINF_SUCCESS;
4478
4479 /*
4480 * Flush the pages.
4481 */
4482 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
4483 if (RT_SUCCESS(rc))
4484 {
4485 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
4486 *pcPendingPages = 0;
4487 }
4488 return rc;
4489}
4490
4491
4492/**
4493 * Converts a GC physical address to a HC ring-3 pointer, with some
4494 * additional checks.
4495 *
4496 * @returns VBox status code.
4497 * @retval VINF_SUCCESS on success.
4498 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4499 * access handler of some kind.
4500 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4501 * accesses or is odd in any way.
4502 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4503 *
4504 * @param pVM The VM handle.
4505 * @param GCPhys The GC physical address to convert.
4506 * @param fWritable Whether write access is required.
4507 * @param ppv Where to store the pointer corresponding to GCPhys on
4508 * success.
4509 */
4510VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
4511{
4512 pgmLock(pVM);
4513
4514 PPGMRAMRANGE pRam;
4515 PPGMPAGE pPage;
4516 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4517 if (RT_SUCCESS(rc))
4518 {
4519 if (PGM_PAGE_IS_BALLOONED(pPage))
4520 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
4521 else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
4522 rc = VINF_SUCCESS;
4523 else
4524 {
4525 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4526 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4527 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
4528 {
4529 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
4530 * in -norawr0 mode. */
4531 if (fWritable)
4532 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
4533 }
4534 else
4535 {
4536 /* Temporarily disabled physical handler(s), since the recompiler
4537 doesn't get notified when it's reset we'll have to pretend it's
4538 operating normally. */
4539 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
4540 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4541 else
4542 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
4543 }
4544 }
4545 if (RT_SUCCESS(rc))
4546 {
4547 int rc2;
4548
4549 /* Make sure what we return is writable. */
4550 if (fWritable)
4551 switch (PGM_PAGE_GET_STATE(pPage))
4552 {
4553 case PGM_PAGE_STATE_ALLOCATED:
4554 break;
4555 case PGM_PAGE_STATE_BALLOONED:
4556 AssertFailed();
4557 break;
4558 case PGM_PAGE_STATE_ZERO:
4559 case PGM_PAGE_STATE_SHARED:
4560 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
4561 break;
4562 case PGM_PAGE_STATE_WRITE_MONITORED:
4563 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4564 AssertLogRelRCReturn(rc2, rc2);
4565 break;
4566 }
4567
4568 /* Get a ring-3 mapping of the address. */
4569 PPGMPAGER3MAPTLBE pTlbe;
4570 rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
4571 AssertLogRelRCReturn(rc2, rc2);
4572 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4573 /** @todo mapping/locking hell; this isn't horribly efficient since
4574 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
4575
4576 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4577 }
4578 else
4579 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4580
4581 /* else: handler catching all access, no pointer returned. */
4582 }
4583 else
4584 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4585
4586 pgmUnlock(pVM);
4587 return rc;
4588}
4589
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette