VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 29287

Last change on this file since 29287 was 29201, checked in by vboxsync, 15 years ago

Shared paging updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 132.6 KB
Line 
1/* $Id: PGMPhys.cpp 29201 2010-05-07 12:24:54Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#include <VBox/pgm.h>
24#include <VBox/iom.h>
25#include <VBox/mm.h>
26#include <VBox/stam.h>
27#include <VBox/rem.h>
28#include <VBox/pdmdev.h>
29#include "PGMInternal.h"
30#include <VBox/vm.h>
31#include "PGMInline.h"
32#include <VBox/sup.h>
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <iprt/thread.h>
40#include <iprt/string.h>
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** The number of pages to free in one batch. */
47#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
48
49
50/*******************************************************************************
51* Internal Functions *
52*******************************************************************************/
53static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
54static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
55
56
57/*
58 * PGMR3PhysReadU8-64
59 * PGMR3PhysWriteU8-64
60 */
61#define PGMPHYSFN_READNAME PGMR3PhysReadU8
62#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
63#define PGMPHYS_DATASIZE 1
64#define PGMPHYS_DATATYPE uint8_t
65#include "PGMPhysRWTmpl.h"
66
67#define PGMPHYSFN_READNAME PGMR3PhysReadU16
68#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
69#define PGMPHYS_DATASIZE 2
70#define PGMPHYS_DATATYPE uint16_t
71#include "PGMPhysRWTmpl.h"
72
73#define PGMPHYSFN_READNAME PGMR3PhysReadU32
74#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
75#define PGMPHYS_DATASIZE 4
76#define PGMPHYS_DATATYPE uint32_t
77#include "PGMPhysRWTmpl.h"
78
79#define PGMPHYSFN_READNAME PGMR3PhysReadU64
80#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
81#define PGMPHYS_DATASIZE 8
82#define PGMPHYS_DATATYPE uint64_t
83#include "PGMPhysRWTmpl.h"
84
85
86/**
87 * EMT worker for PGMR3PhysReadExternal.
88 */
89static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
90{
91 PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead);
92 return VINF_SUCCESS;
93}
94
95
96/**
97 * Write to physical memory, external users.
98 *
99 * @returns VBox status code.
100 * @retval VINF_SUCCESS.
101 *
102 * @param pVM VM Handle.
103 * @param GCPhys Physical address to write to.
104 * @param pvBuf What to write.
105 * @param cbWrite How many bytes to write.
106 *
107 * @thread Any but EMTs.
108 */
109VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
110{
111 VM_ASSERT_OTHER_THREAD(pVM);
112
113 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
114 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
115
116 pgmLock(pVM);
117
118 /*
119 * Copy loop on ram ranges.
120 */
121 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
122 for (;;)
123 {
124 /* Find range. */
125 while (pRam && GCPhys > pRam->GCPhysLast)
126 pRam = pRam->CTX_SUFF(pNext);
127 /* Inside range or not? */
128 if (pRam && GCPhys >= pRam->GCPhys)
129 {
130 /*
131 * Must work our way thru this page by page.
132 */
133 RTGCPHYS off = GCPhys - pRam->GCPhys;
134 while (off < pRam->cb)
135 {
136 unsigned iPage = off >> PAGE_SHIFT;
137 PPGMPAGE pPage = &pRam->aPages[iPage];
138
139 /*
140 * If the page has an ALL access handler, we'll have to
141 * delegate the job to EMT.
142 */
143 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
144 {
145 pgmUnlock(pVM);
146
147 return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 4,
148 pVM, &GCPhys, pvBuf, cbRead);
149 }
150 Assert(!PGM_PAGE_IS_MMIO(pPage));
151
152 /*
153 * Simple stuff, go ahead.
154 */
155 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
156 if (cb > cbRead)
157 cb = cbRead;
158 const void *pvSrc;
159 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
160 if (RT_SUCCESS(rc))
161 memcpy(pvBuf, pvSrc, cb);
162 else
163 {
164 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
165 pRam->GCPhys + off, pPage, rc));
166 memset(pvBuf, 0xff, cb);
167 }
168
169 /* next page */
170 if (cb >= cbRead)
171 {
172 pgmUnlock(pVM);
173 return VINF_SUCCESS;
174 }
175 cbRead -= cb;
176 off += cb;
177 GCPhys += cb;
178 pvBuf = (char *)pvBuf + cb;
179 } /* walk pages in ram range. */
180 }
181 else
182 {
183 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
184
185 /*
186 * Unassigned address space.
187 */
188 if (!pRam)
189 break;
190 size_t cb = pRam->GCPhys - GCPhys;
191 if (cb >= cbRead)
192 {
193 memset(pvBuf, 0xff, cbRead);
194 break;
195 }
196 memset(pvBuf, 0xff, cb);
197
198 cbRead -= cb;
199 pvBuf = (char *)pvBuf + cb;
200 GCPhys += cb;
201 }
202 } /* Ram range walk */
203
204 pgmUnlock(pVM);
205
206 return VINF_SUCCESS;
207}
208
209
210/**
211 * EMT worker for PGMR3PhysWriteExternal.
212 */
213static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
214{
215 /** @todo VERR_EM_NO_MEMORY */
216 PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite);
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * Write to physical memory, external users.
223 *
224 * @returns VBox status code.
225 * @retval VINF_SUCCESS.
226 * @retval VERR_EM_NO_MEMORY.
227 *
228 * @param pVM VM Handle.
229 * @param GCPhys Physical address to write to.
230 * @param pvBuf What to write.
231 * @param cbWrite How many bytes to write.
232 * @param pszWho Who is writing. For tracking down who is writing
233 * after we've saved the state.
234 *
235 * @thread Any but EMTs.
236 */
237VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, const char *pszWho)
238{
239 VM_ASSERT_OTHER_THREAD(pVM);
240
241 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
242 ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x pszWho=%s\n",
243 GCPhys, cbWrite, pszWho));
244 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
245 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
246
247 pgmLock(pVM);
248
249 /*
250 * Copy loop on ram ranges, stop when we hit something difficult.
251 */
252 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
253 for (;;)
254 {
255 /* Find range. */
256 while (pRam && GCPhys > pRam->GCPhysLast)
257 pRam = pRam->CTX_SUFF(pNext);
258 /* Inside range or not? */
259 if (pRam && GCPhys >= pRam->GCPhys)
260 {
261 /*
262 * Must work our way thru this page by page.
263 */
264 RTGCPTR off = GCPhys - pRam->GCPhys;
265 while (off < pRam->cb)
266 {
267 RTGCPTR iPage = off >> PAGE_SHIFT;
268 PPGMPAGE pPage = &pRam->aPages[iPage];
269
270 /*
271 * Is the page problematic, we have to do the work on the EMT.
272 *
273 * Allocating writable pages and access handlers are
274 * problematic, write monitored pages are simple and can be
275 * dealth with here.
276 */
277 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
278 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
279 {
280 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
281 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
282 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
283 else
284 {
285 pgmUnlock(pVM);
286
287 return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 4,
288 pVM, &GCPhys, pvBuf, cbWrite);
289 }
290 }
291 Assert(!PGM_PAGE_IS_MMIO(pPage));
292
293 /*
294 * Simple stuff, go ahead.
295 */
296 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
297 if (cb > cbWrite)
298 cb = cbWrite;
299 void *pvDst;
300 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
301 if (RT_SUCCESS(rc))
302 memcpy(pvDst, pvBuf, cb);
303 else
304 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
305 pRam->GCPhys + off, pPage, rc));
306
307 /* next page */
308 if (cb >= cbWrite)
309 {
310 pgmUnlock(pVM);
311 return VINF_SUCCESS;
312 }
313
314 cbWrite -= cb;
315 off += cb;
316 GCPhys += cb;
317 pvBuf = (const char *)pvBuf + cb;
318 } /* walk pages in ram range */
319 }
320 else
321 {
322 /*
323 * Unassigned address space, skip it.
324 */
325 if (!pRam)
326 break;
327 size_t cb = pRam->GCPhys - GCPhys;
328 if (cb >= cbWrite)
329 break;
330 cbWrite -= cb;
331 pvBuf = (const char *)pvBuf + cb;
332 GCPhys += cb;
333 }
334 } /* Ram range walk */
335
336 pgmUnlock(pVM);
337 return VINF_SUCCESS;
338}
339
340
341/**
342 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
343 *
344 * @returns see PGMR3PhysGCPhys2CCPtrExternal
345 * @param pVM The VM handle.
346 * @param pGCPhys Pointer to the guest physical address.
347 * @param ppv Where to store the mapping address.
348 * @param pLock Where to store the lock.
349 */
350static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
351{
352 /*
353 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
354 * an access handler after it succeeds.
355 */
356 int rc = pgmLock(pVM);
357 AssertRCReturn(rc, rc);
358
359 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
360 if (RT_SUCCESS(rc))
361 {
362 PPGMPAGEMAPTLBE pTlbe;
363 int rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, *pGCPhys, &pTlbe);
364 AssertFatalRC(rc2);
365 PPGMPAGE pPage = pTlbe->pPage;
366 if (PGM_PAGE_IS_MMIO(pPage))
367 {
368 PGMPhysReleasePageMappingLock(pVM, pLock);
369 rc = VERR_PGM_PHYS_PAGE_RESERVED;
370 }
371 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
372#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
373 || pgmPoolIsDirtyPage(pVM, *pGCPhys)
374#endif
375 )
376 {
377 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
378 * not be informed about writes and keep bogus gst->shw mappings around.
379 */
380 pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
381 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
382 /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
383 * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
384 }
385 }
386
387 pgmUnlock(pVM);
388 return rc;
389}
390
391
392/**
393 * Requests the mapping of a guest page into ring-3, external threads.
394 *
395 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
396 * release it.
397 *
398 * This API will assume your intention is to write to the page, and will
399 * therefore replace shared and zero pages. If you do not intend to modify the
400 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
401 *
402 * @returns VBox status code.
403 * @retval VINF_SUCCESS on success.
404 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
405 * backing or if the page has any active access handlers. The caller
406 * must fall back on using PGMR3PhysWriteExternal.
407 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
408 *
409 * @param pVM The VM handle.
410 * @param GCPhys The guest physical address of the page that should be mapped.
411 * @param ppv Where to store the address corresponding to GCPhys.
412 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
413 *
414 * @remark Avoid calling this API from within critical sections (other than the
415 * PGM one) because of the deadlock risk when we have to delegating the
416 * task to an EMT.
417 * @thread Any.
418 */
419VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
420{
421 AssertPtr(ppv);
422 AssertPtr(pLock);
423
424 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
425
426 int rc = pgmLock(pVM);
427 AssertRCReturn(rc, rc);
428
429 /*
430 * Query the Physical TLB entry for the page (may fail).
431 */
432 PPGMPAGEMAPTLBE pTlbe;
433 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
434 if (RT_SUCCESS(rc))
435 {
436 PPGMPAGE pPage = pTlbe->pPage;
437 if (PGM_PAGE_IS_MMIO(pPage))
438 rc = VERR_PGM_PHYS_PAGE_RESERVED;
439 else
440 {
441 /*
442 * If the page is shared, the zero page, or being write monitored
443 * it must be converted to an page that's writable if possible.
444 * We can only deal with write monitored pages here, the rest have
445 * to be on an EMT.
446 */
447 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
448 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
449#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
450 || pgmPoolIsDirtyPage(pVM, GCPhys)
451#endif
452 )
453 {
454 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
455 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
456#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
457 && !pgmPoolIsDirtyPage(pVM, GCPhys)
458#endif
459 )
460 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
461 else
462 {
463 pgmUnlock(pVM);
464
465 return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
466 pVM, &GCPhys, ppv, pLock);
467 }
468 }
469
470 /*
471 * Now, just perform the locking and calculate the return address.
472 */
473 PPGMPAGEMAP pMap = pTlbe->pMap;
474 if (pMap)
475 pMap->cRefs++;
476
477 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
478 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
479 {
480 if (cLocks == 0)
481 pVM->pgm.s.cWriteLockedPages++;
482 PGM_PAGE_INC_WRITE_LOCKS(pPage);
483 }
484 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
485 {
486 PGM_PAGE_INC_WRITE_LOCKS(pPage);
487 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
488 if (pMap)
489 pMap->cRefs++; /* Extra ref to prevent it from going away. */
490 }
491
492 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
493 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
494 pLock->pvMap = pMap;
495 }
496 }
497
498 pgmUnlock(pVM);
499 return rc;
500}
501
502
503/**
504 * Requests the mapping of a guest page into ring-3, external threads.
505 *
506 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
507 * release it.
508 *
509 * @returns VBox status code.
510 * @retval VINF_SUCCESS on success.
511 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
512 * backing or if the page as an active ALL access handler. The caller
513 * must fall back on using PGMPhysRead.
514 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
515 *
516 * @param pVM The VM handle.
517 * @param GCPhys The guest physical address of the page that should be mapped.
518 * @param ppv Where to store the address corresponding to GCPhys.
519 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
520 *
521 * @remark Avoid calling this API from within critical sections (other than
522 * the PGM one) because of the deadlock risk.
523 * @thread Any.
524 */
525VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
526{
527 int rc = pgmLock(pVM);
528 AssertRCReturn(rc, rc);
529
530 /*
531 * Query the Physical TLB entry for the page (may fail).
532 */
533 PPGMPAGEMAPTLBE pTlbe;
534 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
535 if (RT_SUCCESS(rc))
536 {
537 PPGMPAGE pPage = pTlbe->pPage;
538#if 1
539 /* MMIO pages doesn't have any readable backing. */
540 if (PGM_PAGE_IS_MMIO(pPage))
541 rc = VERR_PGM_PHYS_PAGE_RESERVED;
542#else
543 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
544 rc = VERR_PGM_PHYS_PAGE_RESERVED;
545#endif
546 else
547 {
548 /*
549 * Now, just perform the locking and calculate the return address.
550 */
551 PPGMPAGEMAP pMap = pTlbe->pMap;
552 if (pMap)
553 pMap->cRefs++;
554
555 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
556 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
557 {
558 if (cLocks == 0)
559 pVM->pgm.s.cReadLockedPages++;
560 PGM_PAGE_INC_READ_LOCKS(pPage);
561 }
562 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
563 {
564 PGM_PAGE_INC_READ_LOCKS(pPage);
565 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
566 if (pMap)
567 pMap->cRefs++; /* Extra ref to prevent it from going away. */
568 }
569
570 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
571 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
572 pLock->pvMap = pMap;
573 }
574 }
575
576 pgmUnlock(pVM);
577 return rc;
578}
579
580
581/**
582 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
583 *
584 * Called when anything was relocated.
585 *
586 * @param pVM Pointer to the shared VM structure.
587 */
588void pgmR3PhysRelinkRamRanges(PVM pVM)
589{
590 PPGMRAMRANGE pCur;
591
592#ifdef VBOX_STRICT
593 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
594 {
595 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
596 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
597 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
598 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
599 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
600 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
601 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesR3; pCur2; pCur2 = pCur2->pNextR3)
602 Assert( pCur2 == pCur
603 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
604 }
605#endif
606
607 pCur = pVM->pgm.s.pRamRangesR3;
608 if (pCur)
609 {
610 pVM->pgm.s.pRamRangesR0 = pCur->pSelfR0;
611 pVM->pgm.s.pRamRangesRC = pCur->pSelfRC;
612
613 for (; pCur->pNextR3; pCur = pCur->pNextR3)
614 {
615 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
616 pCur->pNextRC = pCur->pNextR3->pSelfRC;
617 }
618
619 Assert(pCur->pNextR0 == NIL_RTR0PTR);
620 Assert(pCur->pNextRC == NIL_RTRCPTR);
621 }
622 else
623 {
624 Assert(pVM->pgm.s.pRamRangesR0 == NIL_RTR0PTR);
625 Assert(pVM->pgm.s.pRamRangesRC == NIL_RTRCPTR);
626 }
627 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
628}
629
630
631/**
632 * Links a new RAM range into the list.
633 *
634 * @param pVM Pointer to the shared VM structure.
635 * @param pNew Pointer to the new list entry.
636 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
637 */
638static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
639{
640 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
641 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
642 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
643
644 pgmLock(pVM);
645
646 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
647 pNew->pNextR3 = pRam;
648 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
649 pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
650
651 if (pPrev)
652 {
653 pPrev->pNextR3 = pNew;
654 pPrev->pNextR0 = pNew->pSelfR0;
655 pPrev->pNextRC = pNew->pSelfRC;
656 }
657 else
658 {
659 pVM->pgm.s.pRamRangesR3 = pNew;
660 pVM->pgm.s.pRamRangesR0 = pNew->pSelfR0;
661 pVM->pgm.s.pRamRangesRC = pNew->pSelfRC;
662 }
663 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
664 pgmUnlock(pVM);
665}
666
667
668/**
669 * Unlink an existing RAM range from the list.
670 *
671 * @param pVM Pointer to the shared VM structure.
672 * @param pRam Pointer to the new list entry.
673 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
674 */
675static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
676{
677 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
678 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
679 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
680
681 pgmLock(pVM);
682
683 PPGMRAMRANGE pNext = pRam->pNextR3;
684 if (pPrev)
685 {
686 pPrev->pNextR3 = pNext;
687 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
688 pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
689 }
690 else
691 {
692 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
693 pVM->pgm.s.pRamRangesR3 = pNext;
694 pVM->pgm.s.pRamRangesR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
695 pVM->pgm.s.pRamRangesRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
696 }
697 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
698 pgmUnlock(pVM);
699}
700
701
702/**
703 * Unlink an existing RAM range from the list.
704 *
705 * @param pVM Pointer to the shared VM structure.
706 * @param pRam Pointer to the new list entry.
707 */
708static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
709{
710 pgmLock(pVM);
711
712 /* find prev. */
713 PPGMRAMRANGE pPrev = NULL;
714 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
715 while (pCur != pRam)
716 {
717 pPrev = pCur;
718 pCur = pCur->pNextR3;
719 }
720 AssertFatal(pCur);
721
722 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
723 pgmUnlock(pVM);
724}
725
726
727/**
728 * Frees a range of pages, replacing them with ZERO pages of the specified type.
729 *
730 * @returns VBox status code.
731 * @param pVM The VM handle.
732 * @param pRam The RAM range in which the pages resides.
733 * @param GCPhys The address of the first page.
734 * @param GCPhysLast The address of the last page.
735 * @param uType The page type to replace then with.
736 */
737static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
738{
739 Assert(PGMIsLockOwner(pVM));
740 uint32_t cPendingPages = 0;
741 PGMMFREEPAGESREQ pReq;
742 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
743 AssertLogRelRCReturn(rc, rc);
744
745 /* Iterate the pages. */
746 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
747 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
748 while (cPagesLeft-- > 0)
749 {
750 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
751 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
752
753 PGM_PAGE_SET_TYPE(pPageDst, uType);
754
755 GCPhys += PAGE_SIZE;
756 pPageDst++;
757 }
758
759 if (cPendingPages)
760 {
761 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
762 AssertLogRelRCReturn(rc, rc);
763 }
764 GMMR3FreePagesCleanup(pReq);
765
766 return rc;
767}
768
769/**
770 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
771 *
772 * This is only called on one of the EMTs while the other ones are waiting for
773 * it to complete this function.
774 *
775 * @returns VINF_SUCCESS (VBox strict status code).
776 * @param pVM The VM handle.
777 * @param pVCpu The VMCPU for the EMT we're being called on. Unused.
778 * @param pvUser User parameter
779 */
780static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
781{
782 uintptr_t *paUser = (uintptr_t *)pvUser;
783 bool fInflate = !!paUser[0];
784 unsigned cPages = paUser[1];
785 RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
786 uint32_t cPendingPages = 0;
787 PGMMFREEPAGESREQ pReq;
788 int rc;
789
790 Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
791 pgmLock(pVM);
792
793 if (fInflate)
794 {
795 /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
796 pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
797
798 /* Replace pages with ZERO pages. */
799 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
800 if (RT_FAILURE(rc))
801 {
802 pgmUnlock(pVM);
803 AssertLogRelRC(rc);
804 return rc;
805 }
806
807 /* Iterate the pages. */
808 for (unsigned i = 0; i < cPages; i++)
809 {
810 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, paPhysPage[i]);
811 if ( pPage == NULL
812 || pPage->uTypeY != PGMPAGETYPE_RAM)
813 {
814 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], (pPage) ? pPage->uTypeY : 0));
815 break;
816 }
817
818 LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
819
820 /* Flush the shadow PT if this page was previously used as a guest page table. */
821 pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
822
823 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i]);
824 if (RT_FAILURE(rc))
825 {
826 pgmUnlock(pVM);
827 AssertLogRelRC(rc);
828 return rc;
829 }
830 Assert(PGM_PAGE_IS_ZERO(pPage));
831 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_BALLOONED);
832 }
833
834 if (cPendingPages)
835 {
836 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
837 if (RT_FAILURE(rc))
838 {
839 pgmUnlock(pVM);
840 AssertLogRelRC(rc);
841 return rc;
842 }
843 }
844 GMMR3FreePagesCleanup(pReq);
845 }
846 else
847 {
848 /* Iterate the pages. */
849 for (unsigned i = 0; i < cPages; i++)
850 {
851 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, paPhysPage[i]);
852 AssertBreak(pPage && pPage->uTypeY == PGMPAGETYPE_RAM);
853
854 LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
855
856 Assert(PGM_PAGE_IS_BALLOONED(pPage));
857
858 /* Change back to zero page. */
859 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
860 }
861
862 /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
863 }
864
865 /* Notify GMM about the balloon change. */
866 rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
867 if (RT_SUCCESS(rc))
868 {
869 if (!fInflate)
870 {
871 Assert(pVM->pgm.s.cBalloonedPages >= cPages);
872 pVM->pgm.s.cBalloonedPages -= cPages;
873 }
874 else
875 pVM->pgm.s.cBalloonedPages += cPages;
876 }
877
878 pgmUnlock(pVM);
879
880 /* Flush the recompiler's TLB as well. */
881 for (unsigned i = 0; i < pVM->cCpus; i++)
882 CPUMSetChangedFlags(&pVM->aCpus[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
883
884 AssertLogRelRC(rc);
885 return rc;
886}
887
888/**
889 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
890 *
891 * @returns VBox status code.
892 * @param pVM The VM handle.
893 * @param fInflate Inflate or deflate memory balloon
894 * @param cPages Number of pages to free
895 * @param paPhysPage Array of guest physical addresses
896 */
897static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
898{
899 uintptr_t paUser[3];
900
901 paUser[0] = fInflate;
902 paUser[1] = cPages;
903 paUser[2] = (uintptr_t)paPhysPage;
904 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
905 AssertRC(rc);
906
907 /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
908 RTMemFree(paPhysPage);
909}
910
911/**
912 * Inflate or deflate a memory balloon
913 *
914 * @returns VBox status code.
915 * @param pVM The VM handle.
916 * @param fInflate Inflate or deflate memory balloon
917 * @param cPages Number of pages to free
918 * @param paPhysPage Array of guest physical addresses
919 */
920VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
921{
922 int rc;
923
924 /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
925 AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
926
927 /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
928 * In the SMP case we post a request packet to postpone the job.
929 */
930 if (pVM->cCpus > 1)
931 {
932 unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
933 RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
934 AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
935
936 memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
937
938 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
939 AssertRC(rc);
940 }
941 else
942 {
943 uintptr_t paUser[3];
944
945 paUser[0] = fInflate;
946 paUser[1] = cPages;
947 paUser[2] = (uintptr_t)paPhysPage;
948 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
949 AssertRC(rc);
950 }
951 return rc;
952}
953
954/**
955 * Query the amount of free memory inside VMMR0
956 *
957 * @returns VBox status code.
958 * @param pVM The VM handle.
959 * @param puTotalAllocSize Pointer to total allocated memory inside VMMR0 (in bytes)
960 * @param puTotalFreeSize Pointer to total free (allocated but not used yet) memory inside VMMR0 (in bytes)
961 * @param puTotalBalloonSize Pointer to total ballooned memory inside VMMR0 (in bytes)
962 */
963VMMR3DECL(int) PGMR3QueryVMMMemoryStats(PVM pVM, uint64_t *puTotalAllocSize, uint64_t *puTotalFreeSize, uint64_t *puTotalBalloonSize)
964{
965 int rc;
966
967 uint64_t cAllocPages = 0, cFreePages = 0, cBalloonPages = 0;
968 rc = GMMR3QueryHypervisorMemoryStats(pVM, &cAllocPages, &cFreePages, &cBalloonPages);
969 AssertRCReturn(rc, rc);
970
971 if (puTotalAllocSize)
972 *puTotalAllocSize = cAllocPages * _4K;
973
974 if (puTotalFreeSize)
975 *puTotalFreeSize = cFreePages * _4K;
976
977 if (puTotalBalloonSize)
978 *puTotalBalloonSize = cBalloonPages * _4K;
979
980 return VINF_SUCCESS;
981}
982
983/**
984 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
985 *
986 * @param pVM The VM handle.
987 * @param pNew The new RAM range.
988 * @param GCPhys The address of the RAM range.
989 * @param GCPhysLast The last address of the RAM range.
990 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
991 * if in HMA.
992 * @param R0PtrNew Ditto for R0.
993 * @param pszDesc The description.
994 * @param pPrev The previous RAM range (for linking).
995 */
996static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
997 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
998{
999 /*
1000 * Initialize the range.
1001 */
1002 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
1003 pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
1004 pNew->GCPhys = GCPhys;
1005 pNew->GCPhysLast = GCPhysLast;
1006 pNew->cb = GCPhysLast - GCPhys + 1;
1007 pNew->pszDesc = pszDesc;
1008 pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
1009 pNew->pvR3 = NULL;
1010 pNew->paLSPages = NULL;
1011
1012 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
1013 RTGCPHYS iPage = cPages;
1014 while (iPage-- > 0)
1015 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
1016
1017 /* Update the page count stats. */
1018 pVM->pgm.s.cZeroPages += cPages;
1019 pVM->pgm.s.cAllPages += cPages;
1020
1021 /*
1022 * Link it.
1023 */
1024 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
1025}
1026
1027
1028/**
1029 * Relocate a floating RAM range.
1030 *
1031 * @copydoc FNPGMRELOCATE.
1032 */
1033static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
1034{
1035 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
1036 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
1037 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE);
1038
1039 switch (enmMode)
1040 {
1041 case PGMRELOCATECALL_SUGGEST:
1042 return true;
1043 case PGMRELOCATECALL_RELOCATE:
1044 {
1045 /* Update myself and then relink all the ranges. */
1046 pgmLock(pVM);
1047 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
1048 pgmR3PhysRelinkRamRanges(pVM);
1049 pgmUnlock(pVM);
1050 return true;
1051 }
1052
1053 default:
1054 AssertFailedReturn(false);
1055 }
1056}
1057
1058
1059/**
1060 * PGMR3PhysRegisterRam worker that registers a high chunk.
1061 *
1062 * @returns VBox status code.
1063 * @param pVM The VM handle.
1064 * @param GCPhys The address of the RAM.
1065 * @param cRamPages The number of RAM pages to register.
1066 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
1067 * @param iChunk The chunk number.
1068 * @param pszDesc The RAM range description.
1069 * @param ppPrev Previous RAM range pointer. In/Out.
1070 */
1071static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
1072 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
1073 PPGMRAMRANGE *ppPrev)
1074{
1075 const char *pszDescChunk = iChunk == 0
1076 ? pszDesc
1077 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
1078 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
1079
1080 /*
1081 * Allocate memory for the new chunk.
1082 */
1083 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
1084 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
1085 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
1086 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
1087 void *pvChunk = NULL;
1088 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
1089#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1090 VMMIsHwVirtExtForced(pVM) ? &R0PtrChunk : NULL,
1091#else
1092 NULL,
1093#endif
1094 paChunkPages);
1095 if (RT_SUCCESS(rc))
1096 {
1097#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1098 if (!VMMIsHwVirtExtForced(pVM))
1099 R0PtrChunk = NIL_RTR0PTR;
1100#else
1101 R0PtrChunk = (uintptr_t)pvChunk;
1102#endif
1103 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
1104
1105 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
1106
1107 /*
1108 * Create a mapping and map the pages into it.
1109 * We push these in below the HMA.
1110 */
1111 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
1112 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
1113 if (RT_SUCCESS(rc))
1114 {
1115 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
1116
1117 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
1118 RTGCPTR GCPtrPage = GCPtrChunk;
1119 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
1120 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
1121 if (RT_SUCCESS(rc))
1122 {
1123 /*
1124 * Ok, init and link the range.
1125 */
1126 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
1127 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
1128 *ppPrev = pNew;
1129 }
1130 }
1131
1132 if (RT_FAILURE(rc))
1133 SUPR3PageFreeEx(pvChunk, cChunkPages);
1134 }
1135
1136 RTMemTmpFree(paChunkPages);
1137 return rc;
1138}
1139
1140
1141/**
1142 * Sets up a range RAM.
1143 *
1144 * This will check for conflicting registrations, make a resource
1145 * reservation for the memory (with GMM), and setup the per-page
1146 * tracking structures (PGMPAGE).
1147 *
1148 * @returns VBox stutus code.
1149 * @param pVM Pointer to the shared VM structure.
1150 * @param GCPhys The physical address of the RAM.
1151 * @param cb The size of the RAM.
1152 * @param pszDesc The description - not copied, so, don't free or change it.
1153 */
1154VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
1155{
1156 /*
1157 * Validate input.
1158 */
1159 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
1160 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1161 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1162 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
1163 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1164 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
1165 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1166 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1167
1168 pgmLock(pVM);
1169
1170 /*
1171 * Find range location and check for conflicts.
1172 * (We don't lock here because the locking by EMT is only required on update.)
1173 */
1174 PPGMRAMRANGE pPrev = NULL;
1175 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1176 while (pRam && GCPhysLast >= pRam->GCPhys)
1177 {
1178 if ( GCPhysLast >= pRam->GCPhys
1179 && GCPhys <= pRam->GCPhysLast)
1180 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1181 GCPhys, GCPhysLast, pszDesc,
1182 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1183 VERR_PGM_RAM_CONFLICT);
1184
1185 /* next */
1186 pPrev = pRam;
1187 pRam = pRam->pNextR3;
1188 }
1189
1190 /*
1191 * Register it with GMM (the API bitches).
1192 */
1193 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
1194 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
1195 if (RT_FAILURE(rc))
1196 {
1197 pgmUnlock(pVM);
1198 return rc;
1199 }
1200
1201 if ( GCPhys >= _4G
1202 && cPages > 256)
1203 {
1204 /*
1205 * The PGMRAMRANGE structures for the high memory can get very big.
1206 * In order to avoid SUPR3PageAllocEx allocation failures due to the
1207 * allocation size limit there and also to avoid being unable to find
1208 * guest mapping space for them, we split this memory up into 4MB in
1209 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
1210 * mode.
1211 *
1212 * The first and last page of each mapping are guard pages and marked
1213 * not-present. So, we've got 4186112 and 16769024 bytes available for
1214 * the PGMRAMRANGE structure.
1215 *
1216 * Note! The sizes used here will influence the saved state.
1217 */
1218 uint32_t cbChunk;
1219 uint32_t cPagesPerChunk;
1220 if (VMMIsHwVirtExtForced(pVM))
1221 {
1222 cbChunk = 16U*_1M;
1223 cPagesPerChunk = 1048048; /* max ~1048059 */
1224 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
1225 }
1226 else
1227 {
1228 cbChunk = 4U*_1M;
1229 cPagesPerChunk = 261616; /* max ~261627 */
1230 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616 < 4U*_1M - PAGE_SIZE * 2);
1231 }
1232 AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
1233
1234 RTGCPHYS cPagesLeft = cPages;
1235 RTGCPHYS GCPhysChunk = GCPhys;
1236 uint32_t iChunk = 0;
1237 while (cPagesLeft > 0)
1238 {
1239 uint32_t cPagesInChunk = cPagesLeft;
1240 if (cPagesInChunk > cPagesPerChunk)
1241 cPagesInChunk = cPagesPerChunk;
1242
1243 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1244 AssertRCReturn(rc, rc);
1245
1246 /* advance */
1247 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1248 cPagesLeft -= cPagesInChunk;
1249 iChunk++;
1250 }
1251 }
1252 else
1253 {
1254 /*
1255 * Allocate, initialize and link the new RAM range.
1256 */
1257 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1258 PPGMRAMRANGE pNew;
1259 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1260 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1261
1262 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1263 }
1264 PGMPhysInvalidatePageMapTLB(pVM);
1265 pgmUnlock(pVM);
1266
1267 /*
1268 * Notify REM.
1269 */
1270 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
1271
1272 return VINF_SUCCESS;
1273}
1274
1275
1276/**
1277 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
1278 *
1279 * We do this late in the init process so that all the ROM and MMIO ranges have
1280 * been registered already and we don't go wasting memory on them.
1281 *
1282 * @returns VBox status code.
1283 *
1284 * @param pVM Pointer to the shared VM structure.
1285 */
1286int pgmR3PhysRamPreAllocate(PVM pVM)
1287{
1288 Assert(pVM->pgm.s.fRamPreAlloc);
1289 Log(("pgmR3PhysRamPreAllocate: enter\n"));
1290
1291 /*
1292 * Walk the RAM ranges and allocate all RAM pages, halt at
1293 * the first allocation error.
1294 */
1295 uint64_t cPages = 0;
1296 uint64_t NanoTS = RTTimeNanoTS();
1297 pgmLock(pVM);
1298 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
1299 {
1300 PPGMPAGE pPage = &pRam->aPages[0];
1301 RTGCPHYS GCPhys = pRam->GCPhys;
1302 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
1303 while (cLeft-- > 0)
1304 {
1305 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1306 {
1307 switch (PGM_PAGE_GET_STATE(pPage))
1308 {
1309 case PGM_PAGE_STATE_ZERO:
1310 {
1311 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
1312 if (RT_FAILURE(rc))
1313 {
1314 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
1315 pgmUnlock(pVM);
1316 return rc;
1317 }
1318 cPages++;
1319 break;
1320 }
1321
1322 case PGM_PAGE_STATE_BALLOONED:
1323 case PGM_PAGE_STATE_ALLOCATED:
1324 case PGM_PAGE_STATE_WRITE_MONITORED:
1325 case PGM_PAGE_STATE_SHARED:
1326 /* nothing to do here. */
1327 break;
1328 }
1329 }
1330
1331 /* next */
1332 pPage++;
1333 GCPhys += PAGE_SIZE;
1334 }
1335 }
1336 pgmUnlock(pVM);
1337 NanoTS = RTTimeNanoTS() - NanoTS;
1338
1339 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
1340 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
1341 return VINF_SUCCESS;
1342}
1343
1344
1345/**
1346 * Resets (zeros) the RAM.
1347 *
1348 * ASSUMES that the caller owns the PGM lock.
1349 *
1350 * @returns VBox status code.
1351 * @param pVM Pointer to the shared VM structure.
1352 */
1353int pgmR3PhysRamReset(PVM pVM)
1354{
1355 Assert(PGMIsLockOwner(pVM));
1356
1357 /* Reset the memory balloon. */
1358 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
1359 AssertRC(rc);
1360
1361#ifdef VBOX_WITH_PAGE_SHARING
1362 /* Clear all registered shared modules. */
1363 rc = GMMR3ResetSharedModules(pVM);
1364 AssertRC(rc);
1365#endif
1366
1367 /*
1368 * We batch up pages that should be freed instead of calling GMM for
1369 * each and every one of them.
1370 */
1371 uint32_t cPendingPages = 0;
1372 PGMMFREEPAGESREQ pReq;
1373 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1374 AssertLogRelRCReturn(rc, rc);
1375
1376 /*
1377 * Walk the ram ranges.
1378 */
1379 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
1380 {
1381 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
1382 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
1383
1384 if (!pVM->pgm.s.fRamPreAlloc)
1385 {
1386 /* Replace all RAM pages by ZERO pages. */
1387 while (iPage-- > 0)
1388 {
1389 PPGMPAGE pPage = &pRam->aPages[iPage];
1390 switch (PGM_PAGE_GET_TYPE(pPage))
1391 {
1392 case PGMPAGETYPE_RAM:
1393 /* Do not replace pages part of a 2 MB continuous range with zero pages, but zero them instead. */
1394 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
1395 {
1396 void *pvPage;
1397 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
1398 AssertLogRelRCReturn(rc, rc);
1399 ASMMemZeroPage(pvPage);
1400 }
1401 else
1402 if (PGM_PAGE_IS_BALLOONED(pPage))
1403 {
1404 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
1405 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
1406 }
1407 else
1408 if (!PGM_PAGE_IS_ZERO(pPage))
1409 {
1410 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1411 AssertLogRelRCReturn(rc, rc);
1412 }
1413 break;
1414
1415 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1416 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1417 break;
1418
1419 case PGMPAGETYPE_MMIO2:
1420 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
1421 case PGMPAGETYPE_ROM:
1422 case PGMPAGETYPE_MMIO:
1423 break;
1424 default:
1425 AssertFailed();
1426 }
1427 } /* for each page */
1428 }
1429 else
1430 {
1431 /* Zero the memory. */
1432 while (iPage-- > 0)
1433 {
1434 PPGMPAGE pPage = &pRam->aPages[iPage];
1435 switch (PGM_PAGE_GET_TYPE(pPage))
1436 {
1437 case PGMPAGETYPE_RAM:
1438 switch (PGM_PAGE_GET_STATE(pPage))
1439 {
1440 case PGM_PAGE_STATE_ZERO:
1441 break;
1442
1443 case PGM_PAGE_STATE_BALLOONED:
1444 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
1445 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
1446 break;
1447
1448 case PGM_PAGE_STATE_SHARED:
1449 case PGM_PAGE_STATE_WRITE_MONITORED:
1450 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1451 AssertLogRelRCReturn(rc, rc);
1452 /* no break */
1453
1454 case PGM_PAGE_STATE_ALLOCATED:
1455 {
1456 void *pvPage;
1457 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
1458 AssertLogRelRCReturn(rc, rc);
1459 ASMMemZeroPage(pvPage);
1460 break;
1461 }
1462 }
1463 break;
1464
1465 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1466 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1467 break;
1468
1469 case PGMPAGETYPE_MMIO2:
1470 case PGMPAGETYPE_ROM_SHADOW:
1471 case PGMPAGETYPE_ROM:
1472 case PGMPAGETYPE_MMIO:
1473 break;
1474 default:
1475 AssertFailed();
1476
1477 }
1478 } /* for each page */
1479 }
1480
1481 }
1482
1483 /*
1484 * Finish off any pages pending freeing.
1485 */
1486 if (cPendingPages)
1487 {
1488 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1489 AssertLogRelRCReturn(rc, rc);
1490 }
1491 GMMR3FreePagesCleanup(pReq);
1492
1493 return VINF_SUCCESS;
1494}
1495
1496
1497/**
1498 * This is the interface IOM is using to register an MMIO region.
1499 *
1500 * It will check for conflicts and ensure that a RAM range structure
1501 * is present before calling the PGMR3HandlerPhysicalRegister API to
1502 * register the callbacks.
1503 *
1504 * @returns VBox status code.
1505 *
1506 * @param pVM Pointer to the shared VM structure.
1507 * @param GCPhys The start of the MMIO region.
1508 * @param cb The size of the MMIO region.
1509 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
1510 * @param pvUserR3 The user argument for R3.
1511 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
1512 * @param pvUserR0 The user argument for R0.
1513 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
1514 * @param pvUserRC The user argument for RC.
1515 * @param pszDesc The description of the MMIO region.
1516 */
1517VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
1518 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
1519 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
1520 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
1521 R3PTRTYPE(const char *) pszDesc)
1522{
1523 /*
1524 * Assert on some assumption.
1525 */
1526 VM_ASSERT_EMT(pVM);
1527 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1528 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1529 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1530 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1531
1532 /*
1533 * Make sure there's a RAM range structure for the region.
1534 */
1535 int rc;
1536 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1537 bool fRamExists = false;
1538 PPGMRAMRANGE pRamPrev = NULL;
1539 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1540 while (pRam && GCPhysLast >= pRam->GCPhys)
1541 {
1542 if ( GCPhysLast >= pRam->GCPhys
1543 && GCPhys <= pRam->GCPhysLast)
1544 {
1545 /* Simplification: all within the same range. */
1546 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1547 && GCPhysLast <= pRam->GCPhysLast,
1548 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
1549 GCPhys, GCPhysLast, pszDesc,
1550 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1551 VERR_PGM_RAM_CONFLICT);
1552
1553 /* Check that it's all RAM or MMIO pages. */
1554 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1555 uint32_t cLeft = cb >> PAGE_SHIFT;
1556 while (cLeft-- > 0)
1557 {
1558 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
1559 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
1560 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
1561 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
1562 VERR_PGM_RAM_CONFLICT);
1563 pPage++;
1564 }
1565
1566 /* Looks good. */
1567 fRamExists = true;
1568 break;
1569 }
1570
1571 /* next */
1572 pRamPrev = pRam;
1573 pRam = pRam->pNextR3;
1574 }
1575 PPGMRAMRANGE pNew;
1576 if (fRamExists)
1577 {
1578 pNew = NULL;
1579
1580 /*
1581 * Make all the pages in the range MMIO/ZERO pages, freeing any
1582 * RAM pages currently mapped here. This might not be 100% correct
1583 * for PCI memory, but we're doing the same thing for MMIO2 pages.
1584 */
1585 rc = pgmLock(pVM);
1586 if (RT_SUCCESS(rc))
1587 {
1588 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
1589 pgmUnlock(pVM);
1590 }
1591 AssertRCReturn(rc, rc);
1592 }
1593 else
1594 {
1595 pgmLock(pVM);
1596
1597 /*
1598 * No RAM range, insert an ad hoc one.
1599 *
1600 * Note that we don't have to tell REM about this range because
1601 * PGMHandlerPhysicalRegisterEx will do that for us.
1602 */
1603 Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
1604
1605 const uint32_t cPages = cb >> PAGE_SHIFT;
1606 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1607 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
1608 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1609
1610 /* Initialize the range. */
1611 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
1612 pNew->pSelfRC = MMHyperCCToRC(pVM, pNew);
1613 pNew->GCPhys = GCPhys;
1614 pNew->GCPhysLast = GCPhysLast;
1615 pNew->cb = cb;
1616 pNew->pszDesc = pszDesc;
1617 pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
1618 pNew->pvR3 = NULL;
1619 pNew->paLSPages = NULL;
1620
1621 uint32_t iPage = cPages;
1622 while (iPage-- > 0)
1623 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
1624 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
1625
1626 /* update the page count stats. */
1627 pVM->pgm.s.cPureMmioPages += cPages;
1628 pVM->pgm.s.cAllPages += cPages;
1629
1630 /* link it */
1631 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
1632
1633 pgmUnlock(pVM);
1634 }
1635
1636 /*
1637 * Register the access handler.
1638 */
1639 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
1640 pfnHandlerR3, pvUserR3,
1641 pfnHandlerR0, pvUserR0,
1642 pfnHandlerRC, pvUserRC, pszDesc);
1643 if ( RT_FAILURE(rc)
1644 && !fRamExists)
1645 {
1646 pVM->pgm.s.cPureMmioPages -= cb >> PAGE_SHIFT;
1647 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
1648
1649 /* remove the ad hoc range. */
1650 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
1651 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
1652 MMHyperFree(pVM, pRam);
1653 }
1654 PGMPhysInvalidatePageMapTLB(pVM);
1655
1656 return rc;
1657}
1658
1659
1660/**
1661 * This is the interface IOM is using to register an MMIO region.
1662 *
1663 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
1664 * any ad hoc PGMRAMRANGE left behind.
1665 *
1666 * @returns VBox status code.
1667 * @param pVM Pointer to the shared VM structure.
1668 * @param GCPhys The start of the MMIO region.
1669 * @param cb The size of the MMIO region.
1670 */
1671VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
1672{
1673 VM_ASSERT_EMT(pVM);
1674
1675 /*
1676 * First deregister the handler, then check if we should remove the ram range.
1677 */
1678 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1679 if (RT_SUCCESS(rc))
1680 {
1681 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1682 PPGMRAMRANGE pRamPrev = NULL;
1683 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1684 while (pRam && GCPhysLast >= pRam->GCPhys)
1685 {
1686 /** @todo We're being a bit too careful here. rewrite. */
1687 if ( GCPhysLast == pRam->GCPhysLast
1688 && GCPhys == pRam->GCPhys)
1689 {
1690 Assert(pRam->cb == cb);
1691
1692 /*
1693 * See if all the pages are dead MMIO pages.
1694 */
1695 uint32_t const cPages = cb >> PAGE_SHIFT;
1696 bool fAllMMIO = true;
1697 uint32_t iPage = 0;
1698 uint32_t cLeft = cPages;
1699 while (cLeft-- > 0)
1700 {
1701 PPGMPAGE pPage = &pRam->aPages[iPage];
1702 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
1703 /*|| not-out-of-action later */)
1704 {
1705 fAllMMIO = false;
1706 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1707 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1708 break;
1709 }
1710 Assert(PGM_PAGE_IS_ZERO(pPage));
1711 pPage++;
1712 }
1713 if (fAllMMIO)
1714 {
1715 /*
1716 * Ad-hoc range, unlink and free it.
1717 */
1718 Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
1719 GCPhys, GCPhysLast, pRam->pszDesc));
1720
1721 pVM->pgm.s.cAllPages -= cPages;
1722 pVM->pgm.s.cPureMmioPages -= cPages;
1723
1724 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
1725 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
1726 MMHyperFree(pVM, pRam);
1727 break;
1728 }
1729 }
1730
1731 /*
1732 * Range match? It will all be within one range (see PGMAllHandler.cpp).
1733 */
1734 if ( GCPhysLast >= pRam->GCPhys
1735 && GCPhys <= pRam->GCPhysLast)
1736 {
1737 Assert(GCPhys >= pRam->GCPhys);
1738 Assert(GCPhysLast <= pRam->GCPhysLast);
1739
1740 /*
1741 * Turn the pages back into RAM pages.
1742 */
1743 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1744 uint32_t cLeft = cb >> PAGE_SHIFT;
1745 while (cLeft--)
1746 {
1747 PPGMPAGE pPage = &pRam->aPages[iPage];
1748 AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1749 AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1750 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
1751 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_RAM);
1752 }
1753 break;
1754 }
1755
1756 /* next */
1757 pRamPrev = pRam;
1758 pRam = pRam->pNextR3;
1759 }
1760 }
1761
1762 PGMPhysInvalidatePageMapTLB(pVM);
1763 return rc;
1764}
1765
1766
1767/**
1768 * Locate a MMIO2 range.
1769 *
1770 * @returns Pointer to the MMIO2 range.
1771 * @param pVM Pointer to the shared VM structure.
1772 * @param pDevIns The device instance owning the region.
1773 * @param iRegion The region.
1774 */
1775DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1776{
1777 /*
1778 * Search the list.
1779 */
1780 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1781 if ( pCur->pDevInsR3 == pDevIns
1782 && pCur->iRegion == iRegion)
1783 return pCur;
1784 return NULL;
1785}
1786
1787
1788/**
1789 * Allocate and register an MMIO2 region.
1790 *
1791 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
1792 * RAM associated with a device. It is also non-shared memory with a
1793 * permanent ring-3 mapping and page backing (presently).
1794 *
1795 * A MMIO2 range may overlap with base memory if a lot of RAM
1796 * is configured for the VM, in which case we'll drop the base
1797 * memory pages. Presently we will make no attempt to preserve
1798 * anything that happens to be present in the base memory that
1799 * is replaced, this is of course incorrectly but it's too much
1800 * effort.
1801 *
1802 * @returns VBox status code.
1803 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
1804 * @retval VERR_ALREADY_EXISTS if the region already exists.
1805 *
1806 * @param pVM Pointer to the shared VM structure.
1807 * @param pDevIns The device instance owning the region.
1808 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
1809 * this number has to be the number of that region. Otherwise
1810 * it can be any number safe UINT8_MAX.
1811 * @param cb The size of the region. Must be page aligned.
1812 * @param fFlags Reserved for future use, must be zero.
1813 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
1814 * @param pszDesc The description.
1815 */
1816VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
1817{
1818 /*
1819 * Validate input.
1820 */
1821 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1822 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1823 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1824 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
1825 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1826 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1827 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
1828 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1829 AssertReturn(cb, VERR_INVALID_PARAMETER);
1830 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
1831
1832 const uint32_t cPages = cb >> PAGE_SHIFT;
1833 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
1834 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
1835
1836 /*
1837 * For the 2nd+ instance, mangle the description string so it's unique.
1838 */
1839 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
1840 {
1841 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
1842 if (!pszDesc)
1843 return VERR_NO_MEMORY;
1844 }
1845
1846 /*
1847 * Try reserve and allocate the backing memory first as this is what is
1848 * most likely to fail.
1849 */
1850 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
1851 if (RT_SUCCESS(rc))
1852 {
1853 void *pvPages;
1854 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
1855 if (RT_SUCCESS(rc))
1856 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
1857 if (RT_SUCCESS(rc))
1858 {
1859 memset(pvPages, 0, cPages * PAGE_SIZE);
1860
1861 /*
1862 * Create the MMIO2 range record for it.
1863 */
1864 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
1865 PPGMMMIO2RANGE pNew;
1866 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1867 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
1868 if (RT_SUCCESS(rc))
1869 {
1870 pNew->pDevInsR3 = pDevIns;
1871 pNew->pvR3 = pvPages;
1872 //pNew->pNext = NULL;
1873 //pNew->fMapped = false;
1874 //pNew->fOverlapping = false;
1875 pNew->iRegion = iRegion;
1876 pNew->idSavedState = UINT8_MAX;
1877 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
1878 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange);
1879 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
1880 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
1881 pNew->RamRange.pszDesc = pszDesc;
1882 pNew->RamRange.cb = cb;
1883 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2;
1884 pNew->RamRange.pvR3 = pvPages;
1885 //pNew->RamRange.paLSPages = NULL;
1886
1887 uint32_t iPage = cPages;
1888 while (iPage-- > 0)
1889 {
1890 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
1891 paPages[iPage].Phys, NIL_GMM_PAGEID,
1892 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1893 }
1894
1895 /* update page count stats */
1896 pVM->pgm.s.cAllPages += cPages;
1897 pVM->pgm.s.cPrivatePages += cPages;
1898
1899 /*
1900 * Link it into the list.
1901 * Since there is no particular order, just push it.
1902 */
1903 pgmLock(pVM);
1904 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
1905 pVM->pgm.s.pMmio2RangesR3 = pNew;
1906 pgmUnlock(pVM);
1907
1908 *ppv = pvPages;
1909 RTMemTmpFree(paPages);
1910 PGMPhysInvalidatePageMapTLB(pVM);
1911 return VINF_SUCCESS;
1912 }
1913
1914 SUPR3PageFreeEx(pvPages, cPages);
1915 }
1916 RTMemTmpFree(paPages);
1917 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
1918 }
1919 if (pDevIns->iInstance > 0)
1920 MMR3HeapFree((void *)pszDesc);
1921 return rc;
1922}
1923
1924
1925/**
1926 * Deregisters and frees an MMIO2 region.
1927 *
1928 * Any physical (and virtual) access handlers registered for the region must
1929 * be deregistered before calling this function.
1930 *
1931 * @returns VBox status code.
1932 * @param pVM Pointer to the shared VM structure.
1933 * @param pDevIns The device instance owning the region.
1934 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
1935 */
1936VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1937{
1938 /*
1939 * Validate input.
1940 */
1941 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1942 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1943 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
1944
1945 pgmLock(pVM);
1946 int rc = VINF_SUCCESS;
1947 unsigned cFound = 0;
1948 PPGMMMIO2RANGE pPrev = NULL;
1949 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
1950 while (pCur)
1951 {
1952 if ( pCur->pDevInsR3 == pDevIns
1953 && ( iRegion == UINT32_MAX
1954 || pCur->iRegion == iRegion))
1955 {
1956 cFound++;
1957
1958 /*
1959 * Unmap it if it's mapped.
1960 */
1961 if (pCur->fMapped)
1962 {
1963 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
1964 AssertRC(rc2);
1965 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1966 rc = rc2;
1967 }
1968
1969 /*
1970 * Unlink it
1971 */
1972 PPGMMMIO2RANGE pNext = pCur->pNextR3;
1973 if (pPrev)
1974 pPrev->pNextR3 = pNext;
1975 else
1976 pVM->pgm.s.pMmio2RangesR3 = pNext;
1977 pCur->pNextR3 = NULL;
1978
1979 /*
1980 * Free the memory.
1981 */
1982 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
1983 AssertRC(rc2);
1984 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1985 rc = rc2;
1986
1987 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
1988 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
1989 AssertRC(rc2);
1990 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1991 rc = rc2;
1992
1993 /* we're leaking hyper memory here if done at runtime. */
1994#ifdef VBOX_STRICT
1995 VMSTATE const enmState = VMR3GetState(pVM);
1996 AssertMsg( enmState == VMSTATE_POWERING_OFF
1997 || enmState == VMSTATE_POWERING_OFF_LS
1998 || enmState == VMSTATE_OFF
1999 || enmState == VMSTATE_OFF_LS
2000 || enmState == VMSTATE_DESTROYING
2001 || enmState == VMSTATE_TERMINATED
2002 || enmState == VMSTATE_CREATING
2003 , ("%s\n", VMR3GetStateName(enmState)));
2004#endif
2005 /*rc = MMHyperFree(pVM, pCur);
2006 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
2007
2008
2009 /* update page count stats */
2010 pVM->pgm.s.cAllPages -= cPages;
2011 pVM->pgm.s.cPrivatePages -= cPages;
2012
2013 /* next */
2014 pCur = pNext;
2015 }
2016 else
2017 {
2018 pPrev = pCur;
2019 pCur = pCur->pNextR3;
2020 }
2021 }
2022 PGMPhysInvalidatePageMapTLB(pVM);
2023 pgmUnlock(pVM);
2024 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
2025}
2026
2027
2028/**
2029 * Maps a MMIO2 region.
2030 *
2031 * This is done when a guest / the bios / state loading changes the
2032 * PCI config. The replacing of base memory has the same restrictions
2033 * as during registration, of course.
2034 *
2035 * @returns VBox status code.
2036 *
2037 * @param pVM Pointer to the shared VM structure.
2038 * @param pDevIns The
2039 */
2040VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2041{
2042 /*
2043 * Validate input
2044 */
2045 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2046 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2047 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2048 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2049 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2050 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2051
2052 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2053 AssertReturn(pCur, VERR_NOT_FOUND);
2054 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
2055 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
2056 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
2057
2058 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
2059 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2060
2061 /*
2062 * Find our location in the ram range list, checking for
2063 * restriction we don't bother implementing yet (partially overlapping).
2064 */
2065 bool fRamExists = false;
2066 PPGMRAMRANGE pRamPrev = NULL;
2067 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2068 while (pRam && GCPhysLast >= pRam->GCPhys)
2069 {
2070 if ( GCPhys <= pRam->GCPhysLast
2071 && GCPhysLast >= pRam->GCPhys)
2072 {
2073 /* completely within? */
2074 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2075 && GCPhysLast <= pRam->GCPhysLast,
2076 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
2077 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
2078 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2079 VERR_PGM_RAM_CONFLICT);
2080 fRamExists = true;
2081 break;
2082 }
2083
2084 /* next */
2085 pRamPrev = pRam;
2086 pRam = pRam->pNextR3;
2087 }
2088 if (fRamExists)
2089 {
2090 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2091 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2092 while (cPagesLeft-- > 0)
2093 {
2094 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2095 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
2096 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
2097 VERR_PGM_RAM_CONFLICT);
2098 pPage++;
2099 }
2100 }
2101 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
2102 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
2103
2104 /*
2105 * Make the changes.
2106 */
2107 pgmLock(pVM);
2108
2109 pCur->RamRange.GCPhys = GCPhys;
2110 pCur->RamRange.GCPhysLast = GCPhysLast;
2111 pCur->fMapped = true;
2112 pCur->fOverlapping = fRamExists;
2113
2114 if (fRamExists)
2115 {
2116/** @todo use pgmR3PhysFreePageRange here. */
2117 uint32_t cPendingPages = 0;
2118 PGMMFREEPAGESREQ pReq;
2119 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2120 AssertLogRelRCReturn(rc, rc);
2121
2122 /* replace the pages, freeing all present RAM pages. */
2123 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
2124 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2125 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2126 while (cPagesLeft-- > 0)
2127 {
2128 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
2129 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
2130
2131 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
2132 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
2133 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
2134 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
2135
2136 pVM->pgm.s.cZeroPages--;
2137 GCPhys += PAGE_SIZE;
2138 pPageSrc++;
2139 pPageDst++;
2140 }
2141
2142 /* Flush physical page map TLB. */
2143 PGMPhysInvalidatePageMapTLB(pVM);
2144
2145 if (cPendingPages)
2146 {
2147 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2148 AssertLogRelRCReturn(rc, rc);
2149 }
2150 GMMR3FreePagesCleanup(pReq);
2151 pgmUnlock(pVM);
2152 }
2153 else
2154 {
2155 RTGCPHYS cb = pCur->RamRange.cb;
2156
2157 /* link in the ram range */
2158 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
2159 pgmUnlock(pVM);
2160
2161 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
2162 }
2163
2164 PGMPhysInvalidatePageMapTLB(pVM);
2165 return VINF_SUCCESS;
2166}
2167
2168
2169/**
2170 * Unmaps a MMIO2 region.
2171 *
2172 * This is done when a guest / the bios / state loading changes the
2173 * PCI config. The replacing of base memory has the same restrictions
2174 * as during registration, of course.
2175 */
2176VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2177{
2178 /*
2179 * Validate input
2180 */
2181 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2182 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2183 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2184 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2185 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2186 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2187
2188 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2189 AssertReturn(pCur, VERR_NOT_FOUND);
2190 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
2191 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
2192 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
2193
2194 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
2195 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
2196
2197 /*
2198 * Unmap it.
2199 */
2200 pgmLock(pVM);
2201
2202 RTGCPHYS GCPhysRangeREM;
2203 RTGCPHYS cbRangeREM;
2204 bool fInformREM;
2205 if (pCur->fOverlapping)
2206 {
2207 /* Restore the RAM pages we've replaced. */
2208 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2209 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
2210 pRam = pRam->pNextR3;
2211
2212 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
2213 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
2214 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2215 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2216 while (cPagesLeft-- > 0)
2217 {
2218 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhysZeroPg);
2219 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
2220 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
2221 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
2222 PGM_PAGE_SET_PDE_TYPE(pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
2223
2224 pVM->pgm.s.cZeroPages++;
2225 pPageDst++;
2226 }
2227
2228 /* Flush physical page map TLB. */
2229 PGMPhysInvalidatePageMapTLB(pVM);
2230
2231 GCPhysRangeREM = NIL_RTGCPHYS; /* shuts up gcc */
2232 cbRangeREM = RTGCPHYS_MAX; /* ditto */
2233 fInformREM = false;
2234 }
2235 else
2236 {
2237 GCPhysRangeREM = pCur->RamRange.GCPhys;
2238 cbRangeREM = pCur->RamRange.cb;
2239 fInformREM = true;
2240
2241 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
2242 }
2243
2244 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
2245 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
2246 pCur->fOverlapping = false;
2247 pCur->fMapped = false;
2248
2249 PGMPhysInvalidatePageMapTLB(pVM);
2250 pgmUnlock(pVM);
2251
2252 if (fInformREM)
2253 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, cbRangeREM);
2254
2255 return VINF_SUCCESS;
2256}
2257
2258
2259/**
2260 * Checks if the given address is an MMIO2 base address or not.
2261 *
2262 * @returns true/false accordingly.
2263 * @param pVM Pointer to the shared VM structure.
2264 * @param pDevIns The owner of the memory, optional.
2265 * @param GCPhys The address to check.
2266 */
2267VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
2268{
2269 /*
2270 * Validate input
2271 */
2272 VM_ASSERT_EMT_RETURN(pVM, false);
2273 AssertPtrReturn(pDevIns, false);
2274 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
2275 AssertReturn(GCPhys != 0, false);
2276 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
2277
2278 /*
2279 * Search the list.
2280 */
2281 pgmLock(pVM);
2282 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
2283 if (pCur->RamRange.GCPhys == GCPhys)
2284 {
2285 Assert(pCur->fMapped);
2286 pgmUnlock(pVM);
2287 return true;
2288 }
2289 pgmUnlock(pVM);
2290 return false;
2291}
2292
2293
2294/**
2295 * Gets the HC physical address of a page in the MMIO2 region.
2296 *
2297 * This is API is intended for MMHyper and shouldn't be called
2298 * by anyone else...
2299 *
2300 * @returns VBox status code.
2301 * @param pVM Pointer to the shared VM structure.
2302 * @param pDevIns The owner of the memory, optional.
2303 * @param iRegion The region.
2304 * @param off The page expressed an offset into the MMIO2 region.
2305 * @param pHCPhys Where to store the result.
2306 */
2307VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
2308{
2309 /*
2310 * Validate input
2311 */
2312 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2313 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2314 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2315
2316 pgmLock(pVM);
2317 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2318 AssertReturn(pCur, VERR_NOT_FOUND);
2319 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2320
2321 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
2322 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
2323 pgmUnlock(pVM);
2324 return VINF_SUCCESS;
2325}
2326
2327
2328/**
2329 * Maps a portion of an MMIO2 region into kernel space (host).
2330 *
2331 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
2332 * or the VM is terminated.
2333 *
2334 * @return VBox status code.
2335 *
2336 * @param pVM Pointer to the shared VM structure.
2337 * @param pDevIns The device owning the MMIO2 memory.
2338 * @param iRegion The region.
2339 * @param off The offset into the region. Must be page aligned.
2340 * @param cb The number of bytes to map. Must be page aligned.
2341 * @param pszDesc Mapping description.
2342 * @param pR0Ptr Where to store the R0 address.
2343 */
2344VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
2345 const char *pszDesc, PRTR0PTR pR0Ptr)
2346{
2347 /*
2348 * Validate input.
2349 */
2350 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2351 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2352 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2353
2354 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2355 AssertReturn(pCur, VERR_NOT_FOUND);
2356 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2357 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2358 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2359
2360 /*
2361 * Pass the request on to the support library/driver.
2362 */
2363 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
2364
2365 return rc;
2366}
2367
2368
2369/**
2370 * Registers a ROM image.
2371 *
2372 * Shadowed ROM images requires double the amount of backing memory, so,
2373 * don't use that unless you have to. Shadowing of ROM images is process
2374 * where we can select where the reads go and where the writes go. On real
2375 * hardware the chipset provides means to configure this. We provide
2376 * PGMR3PhysProtectROM() for this purpose.
2377 *
2378 * A read-only copy of the ROM image will always be kept around while we
2379 * will allocate RAM pages for the changes on demand (unless all memory
2380 * is configured to be preallocated).
2381 *
2382 * @returns VBox status.
2383 * @param pVM VM Handle.
2384 * @param pDevIns The device instance owning the ROM.
2385 * @param GCPhys First physical address in the range.
2386 * Must be page aligned!
2387 * @param cbRange The size of the range (in bytes).
2388 * Must be page aligned!
2389 * @param pvBinary Pointer to the binary data backing the ROM image.
2390 * This must be exactly \a cbRange in size.
2391 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
2392 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
2393 * @param pszDesc Pointer to description string. This must not be freed.
2394 *
2395 * @remark There is no way to remove the rom, automatically on device cleanup or
2396 * manually from the device yet. This isn't difficult in any way, it's
2397 * just not something we expect to be necessary for a while.
2398 */
2399VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
2400 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
2401{
2402 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
2403 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
2404
2405 /*
2406 * Validate input.
2407 */
2408 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2409 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
2410 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
2411 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2412 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2413 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
2414 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2415 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
2416 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
2417
2418 const uint32_t cPages = cb >> PAGE_SHIFT;
2419
2420 /*
2421 * Find the ROM location in the ROM list first.
2422 */
2423 PPGMROMRANGE pRomPrev = NULL;
2424 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
2425 while (pRom && GCPhysLast >= pRom->GCPhys)
2426 {
2427 if ( GCPhys <= pRom->GCPhysLast
2428 && GCPhysLast >= pRom->GCPhys)
2429 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
2430 GCPhys, GCPhysLast, pszDesc,
2431 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
2432 VERR_PGM_RAM_CONFLICT);
2433 /* next */
2434 pRomPrev = pRom;
2435 pRom = pRom->pNextR3;
2436 }
2437
2438 /*
2439 * Find the RAM location and check for conflicts.
2440 *
2441 * Conflict detection is a bit different than for RAM
2442 * registration since a ROM can be located within a RAM
2443 * range. So, what we have to check for is other memory
2444 * types (other than RAM that is) and that we don't span
2445 * more than one RAM range (layz).
2446 */
2447 bool fRamExists = false;
2448 PPGMRAMRANGE pRamPrev = NULL;
2449 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2450 while (pRam && GCPhysLast >= pRam->GCPhys)
2451 {
2452 if ( GCPhys <= pRam->GCPhysLast
2453 && GCPhysLast >= pRam->GCPhys)
2454 {
2455 /* completely within? */
2456 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2457 && GCPhysLast <= pRam->GCPhysLast,
2458 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
2459 GCPhys, GCPhysLast, pszDesc,
2460 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2461 VERR_PGM_RAM_CONFLICT);
2462 fRamExists = true;
2463 break;
2464 }
2465
2466 /* next */
2467 pRamPrev = pRam;
2468 pRam = pRam->pNextR3;
2469 }
2470 if (fRamExists)
2471 {
2472 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2473 uint32_t cPagesLeft = cPages;
2474 while (cPagesLeft-- > 0)
2475 {
2476 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2477 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
2478 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
2479 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
2480 Assert(PGM_PAGE_IS_ZERO(pPage));
2481 pPage++;
2482 }
2483 }
2484
2485 /*
2486 * Update the base memory reservation if necessary.
2487 */
2488 uint32_t cExtraBaseCost = fRamExists ? 0 : cPages;
2489 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2490 cExtraBaseCost += cPages;
2491 if (cExtraBaseCost)
2492 {
2493 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
2494 if (RT_FAILURE(rc))
2495 return rc;
2496 }
2497
2498 /*
2499 * Allocate memory for the virgin copy of the RAM.
2500 */
2501 PGMMALLOCATEPAGESREQ pReq;
2502 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
2503 AssertRCReturn(rc, rc);
2504
2505 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2506 {
2507 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
2508 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
2509 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
2510 }
2511
2512 pgmLock(pVM);
2513 rc = GMMR3AllocatePagesPerform(pVM, pReq);
2514 pgmUnlock(pVM);
2515 if (RT_FAILURE(rc))
2516 {
2517 GMMR3AllocatePagesCleanup(pReq);
2518 return rc;
2519 }
2520
2521 /*
2522 * Allocate the new ROM range and RAM range (if necessary).
2523 */
2524 PPGMROMRANGE pRomNew;
2525 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
2526 if (RT_SUCCESS(rc))
2527 {
2528 PPGMRAMRANGE pRamNew = NULL;
2529 if (!fRamExists)
2530 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
2531 if (RT_SUCCESS(rc))
2532 {
2533 pgmLock(pVM);
2534
2535 /*
2536 * Initialize and insert the RAM range (if required).
2537 */
2538 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
2539 if (!fRamExists)
2540 {
2541 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
2542 pRamNew->pSelfRC = MMHyperCCToRC(pVM, pRamNew);
2543 pRamNew->GCPhys = GCPhys;
2544 pRamNew->GCPhysLast = GCPhysLast;
2545 pRamNew->cb = cb;
2546 pRamNew->pszDesc = pszDesc;
2547 pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
2548 pRamNew->pvR3 = NULL;
2549 pRamNew->paLSPages = NULL;
2550
2551 PPGMPAGE pPage = &pRamNew->aPages[0];
2552 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2553 {
2554 PGM_PAGE_INIT(pPage,
2555 pReq->aPages[iPage].HCPhysGCPhys,
2556 pReq->aPages[iPage].idPage,
2557 PGMPAGETYPE_ROM,
2558 PGM_PAGE_STATE_ALLOCATED);
2559
2560 pRomPage->Virgin = *pPage;
2561 }
2562
2563 pVM->pgm.s.cAllPages += cPages;
2564 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
2565 }
2566 else
2567 {
2568 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2569 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2570 {
2571 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
2572 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
2573 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
2574 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
2575
2576 pRomPage->Virgin = *pPage;
2577 }
2578
2579 pRamNew = pRam;
2580
2581 pVM->pgm.s.cZeroPages -= cPages;
2582 }
2583 pVM->pgm.s.cPrivatePages += cPages;
2584
2585 /* Flush physical page map TLB. */
2586 PGMPhysInvalidatePageMapTLB(pVM);
2587
2588 pgmUnlock(pVM);
2589
2590
2591 /*
2592 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
2593 *
2594 * If it's shadowed we'll register the handler after the ROM notification
2595 * so we get the access handler callbacks that we should. If it isn't
2596 * shadowed we'll do it the other way around to make REM use the built-in
2597 * ROM behavior and not the handler behavior (which is to route all access
2598 * to PGM atm).
2599 */
2600 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2601 {
2602 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
2603 rc = PGMR3HandlerPhysicalRegister(pVM,
2604 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2605 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2606 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2607 GCPhys, GCPhysLast,
2608 pgmR3PhysRomWriteHandler, pRomNew,
2609 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2610 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2611 }
2612 else
2613 {
2614 rc = PGMR3HandlerPhysicalRegister(pVM,
2615 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2616 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2617 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2618 GCPhys, GCPhysLast,
2619 pgmR3PhysRomWriteHandler, pRomNew,
2620 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2621 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2622 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
2623 }
2624 if (RT_SUCCESS(rc))
2625 {
2626 pgmLock(pVM);
2627
2628 /*
2629 * Copy the image over to the virgin pages.
2630 * This must be done after linking in the RAM range.
2631 */
2632 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
2633 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
2634 {
2635 void *pvDstPage;
2636 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pvDstPage);
2637 if (RT_FAILURE(rc))
2638 {
2639 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
2640 break;
2641 }
2642 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
2643 }
2644 if (RT_SUCCESS(rc))
2645 {
2646 /*
2647 * Initialize the ROM range.
2648 * Note that the Virgin member of the pages has already been initialized above.
2649 */
2650 pRomNew->GCPhys = GCPhys;
2651 pRomNew->GCPhysLast = GCPhysLast;
2652 pRomNew->cb = cb;
2653 pRomNew->fFlags = fFlags;
2654 pRomNew->idSavedState = UINT8_MAX;
2655 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY ? pvBinary : NULL;
2656 pRomNew->pszDesc = pszDesc;
2657
2658 for (unsigned iPage = 0; iPage < cPages; iPage++)
2659 {
2660 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
2661 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
2662 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
2663 }
2664
2665 /* update the page count stats for the shadow pages. */
2666 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2667 {
2668 pVM->pgm.s.cZeroPages += cPages;
2669 pVM->pgm.s.cAllPages += cPages;
2670 }
2671
2672 /*
2673 * Insert the ROM range, tell REM and return successfully.
2674 */
2675 pRomNew->pNextR3 = pRom;
2676 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
2677 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
2678
2679 if (pRomPrev)
2680 {
2681 pRomPrev->pNextR3 = pRomNew;
2682 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
2683 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
2684 }
2685 else
2686 {
2687 pVM->pgm.s.pRomRangesR3 = pRomNew;
2688 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
2689 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
2690 }
2691
2692 PGMPhysInvalidatePageMapTLB(pVM);
2693 GMMR3AllocatePagesCleanup(pReq);
2694 pgmUnlock(pVM);
2695 return VINF_SUCCESS;
2696 }
2697
2698 /* bail out */
2699
2700 pgmUnlock(pVM);
2701 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2702 AssertRC(rc2);
2703 pgmLock(pVM);
2704 }
2705
2706 if (!fRamExists)
2707 {
2708 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
2709 MMHyperFree(pVM, pRamNew);
2710 }
2711 }
2712 MMHyperFree(pVM, pRomNew);
2713 }
2714
2715 /** @todo Purge the mapping cache or something... */
2716 GMMR3FreeAllocatedPages(pVM, pReq);
2717 GMMR3AllocatePagesCleanup(pReq);
2718 pgmUnlock(pVM);
2719 return rc;
2720}
2721
2722
2723/**
2724 * \#PF Handler callback for ROM write accesses.
2725 *
2726 * @returns VINF_SUCCESS if the handler have carried out the operation.
2727 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2728 * @param pVM VM Handle.
2729 * @param GCPhys The physical address the guest is writing to.
2730 * @param pvPhys The HC mapping of that address.
2731 * @param pvBuf What the guest is reading/writing.
2732 * @param cbBuf How much it's reading/writing.
2733 * @param enmAccessType The access type.
2734 * @param pvUser User argument.
2735 */
2736static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
2737{
2738 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
2739 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2740 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
2741 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2742 Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
2743
2744 if (enmAccessType == PGMACCESSTYPE_READ)
2745 {
2746 switch (pRomPage->enmProt)
2747 {
2748 /*
2749 * Take the default action.
2750 */
2751 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2752 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2753 case PGMROMPROT_READ_ROM_WRITE_RAM:
2754 case PGMROMPROT_READ_RAM_WRITE_RAM:
2755 return VINF_PGM_HANDLER_DO_DEFAULT;
2756
2757 default:
2758 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2759 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2760 VERR_INTERNAL_ERROR);
2761 }
2762 }
2763 else
2764 {
2765 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2766 switch (pRomPage->enmProt)
2767 {
2768 /*
2769 * Ignore writes.
2770 */
2771 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2772 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2773 return VINF_SUCCESS;
2774
2775 /*
2776 * Write to the ram page.
2777 */
2778 case PGMROMPROT_READ_ROM_WRITE_RAM:
2779 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
2780 {
2781 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
2782 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
2783
2784 /*
2785 * Take the lock, do lazy allocation, map the page and copy the data.
2786 *
2787 * Note that we have to bypass the mapping TLB since it works on
2788 * guest physical addresses and entering the shadow page would
2789 * kind of screw things up...
2790 */
2791 int rc = pgmLock(pVM);
2792 AssertRC(rc);
2793
2794 PPGMPAGE pShadowPage = &pRomPage->Shadow;
2795 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
2796 {
2797 pShadowPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2798 AssertLogRelReturn(pShadowPage, VERR_INTERNAL_ERROR);
2799 }
2800
2801 void *pvDstPage;
2802 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
2803 if (RT_SUCCESS(rc))
2804 {
2805 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
2806 pRomPage->LiveSave.fWrittenTo = true;
2807 }
2808
2809 pgmUnlock(pVM);
2810 return rc;
2811 }
2812
2813 default:
2814 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2815 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2816 VERR_INTERNAL_ERROR);
2817 }
2818 }
2819}
2820
2821
2822/**
2823 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
2824 * and verify that the virgin part is untouched.
2825 *
2826 * This is done after the normal memory has been cleared.
2827 *
2828 * ASSUMES that the caller owns the PGM lock.
2829 *
2830 * @param pVM The VM handle.
2831 */
2832int pgmR3PhysRomReset(PVM pVM)
2833{
2834 Assert(PGMIsLockOwner(pVM));
2835 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2836 {
2837 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
2838
2839 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2840 {
2841 /*
2842 * Reset the physical handler.
2843 */
2844 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
2845 AssertRCReturn(rc, rc);
2846
2847 /*
2848 * What we do with the shadow pages depends on the memory
2849 * preallocation option. If not enabled, we'll just throw
2850 * out all the dirty pages and replace them by the zero page.
2851 */
2852 if (!pVM->pgm.s.fRamPreAlloc)
2853 {
2854 /* Free the dirty pages. */
2855 uint32_t cPendingPages = 0;
2856 PGMMFREEPAGESREQ pReq;
2857 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2858 AssertRCReturn(rc, rc);
2859
2860 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2861 if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
2862 && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
2863 {
2864 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
2865 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow, pRom->GCPhys + (iPage << PAGE_SHIFT));
2866 AssertLogRelRCReturn(rc, rc);
2867 }
2868
2869 if (cPendingPages)
2870 {
2871 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2872 AssertLogRelRCReturn(rc, rc);
2873 }
2874 GMMR3FreePagesCleanup(pReq);
2875 }
2876 else
2877 {
2878 /* clear all the shadow pages. */
2879 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2880 {
2881 Assert(!PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
2882 void *pvDstPage;
2883 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2884 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
2885 if (RT_FAILURE(rc))
2886 break;
2887 ASMMemZeroPage(pvDstPage);
2888 }
2889 AssertRCReturn(rc, rc);
2890 }
2891 }
2892
2893#ifdef VBOX_STRICT
2894 /*
2895 * Verify that the virgin page is unchanged if possible.
2896 */
2897 if (pRom->pvOriginal)
2898 {
2899 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
2900 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
2901 {
2902 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2903 void const *pvDstPage;
2904 int rc = pgmPhysPageMapReadOnly(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPage);
2905 if (RT_FAILURE(rc))
2906 break;
2907 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
2908 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
2909 GCPhys, pRom->pszDesc));
2910 }
2911 }
2912#endif
2913 }
2914
2915 return VINF_SUCCESS;
2916}
2917
2918
2919/**
2920 * Change the shadowing of a range of ROM pages.
2921 *
2922 * This is intended for implementing chipset specific memory registers
2923 * and will not be very strict about the input. It will silently ignore
2924 * any pages that are not the part of a shadowed ROM.
2925 *
2926 * @returns VBox status code.
2927 * @retval VINF_PGM_SYNC_CR3
2928 *
2929 * @param pVM Pointer to the shared VM structure.
2930 * @param GCPhys Where to start. Page aligned.
2931 * @param cb How much to change. Page aligned.
2932 * @param enmProt The new ROM protection.
2933 */
2934VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
2935{
2936 /*
2937 * Check input
2938 */
2939 if (!cb)
2940 return VINF_SUCCESS;
2941 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2942 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2943 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2944 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2945 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
2946
2947 /*
2948 * Process the request.
2949 */
2950 pgmLock(pVM);
2951 int rc = VINF_SUCCESS;
2952 bool fFlushTLB = false;
2953 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2954 {
2955 if ( GCPhys <= pRom->GCPhysLast
2956 && GCPhysLast >= pRom->GCPhys
2957 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
2958 {
2959 /*
2960 * Iterate the relevant pages and make necessary the changes.
2961 */
2962 bool fChanges = false;
2963 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
2964 ? pRom->cb >> PAGE_SHIFT
2965 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
2966 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2967 iPage < cPages;
2968 iPage++)
2969 {
2970 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2971 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
2972 {
2973 fChanges = true;
2974
2975 /* flush references to the page. */
2976 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
2977 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage, &fFlushTLB);
2978 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
2979 rc = rc2;
2980
2981 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2982 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2983
2984 *pOld = *pRamPage;
2985 *pRamPage = *pNew;
2986 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
2987 }
2988 pRomPage->enmProt = enmProt;
2989 }
2990
2991 /*
2992 * Reset the access handler if we made changes, no need
2993 * to optimize this.
2994 */
2995 if (fChanges)
2996 {
2997 int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
2998 if (RT_FAILURE(rc2))
2999 {
3000 pgmUnlock(pVM);
3001 AssertRC(rc);
3002 return rc2;
3003 }
3004 }
3005
3006 /* Advance - cb isn't updated. */
3007 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
3008 }
3009 }
3010 pgmUnlock(pVM);
3011 if (fFlushTLB)
3012 PGM_INVL_ALL_VCPU_TLBS(pVM);
3013
3014 return rc;
3015}
3016
3017
3018/**
3019 * Sets the Address Gate 20 state.
3020 *
3021 * @param pVCpu The VCPU to operate on.
3022 * @param fEnable True if the gate should be enabled.
3023 * False if the gate should be disabled.
3024 */
3025VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
3026{
3027 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
3028 if (pVCpu->pgm.s.fA20Enabled != fEnable)
3029 {
3030 pVCpu->pgm.s.fA20Enabled = fEnable;
3031 pVCpu->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
3032 REMR3A20Set(pVCpu->pVMR3, pVCpu, fEnable);
3033 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
3034 }
3035}
3036
3037
3038/**
3039 * Tree enumeration callback for dealing with age rollover.
3040 * It will perform a simple compression of the current age.
3041 */
3042static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
3043{
3044 Assert(PGMIsLockOwner((PVM)pvUser));
3045 /* Age compression - ASSUMES iNow == 4. */
3046 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3047 if (pChunk->iAge >= UINT32_C(0xffffff00))
3048 pChunk->iAge = 3;
3049 else if (pChunk->iAge >= UINT32_C(0xfffff000))
3050 pChunk->iAge = 2;
3051 else if (pChunk->iAge)
3052 pChunk->iAge = 1;
3053 else /* iAge = 0 */
3054 pChunk->iAge = 4;
3055
3056 /* reinsert */
3057 PVM pVM = (PVM)pvUser;
3058 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
3059 pChunk->AgeCore.Key = pChunk->iAge;
3060 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3061 return 0;
3062}
3063
3064
3065/**
3066 * Tree enumeration callback that updates the chunks that have
3067 * been used since the last
3068 */
3069static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
3070{
3071 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3072 if (!pChunk->iAge)
3073 {
3074 PVM pVM = (PVM)pvUser;
3075 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
3076 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
3077 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3078 }
3079
3080 return 0;
3081}
3082
3083
3084/**
3085 * Performs ageing of the ring-3 chunk mappings.
3086 *
3087 * @param pVM The VM handle.
3088 */
3089VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
3090{
3091 pgmLock(pVM);
3092 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
3093 pVM->pgm.s.ChunkR3Map.iNow++;
3094 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
3095 {
3096 pVM->pgm.s.ChunkR3Map.iNow = 4;
3097 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
3098 }
3099 else
3100 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
3101 pgmUnlock(pVM);
3102}
3103
3104
3105/**
3106 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
3107 */
3108typedef struct PGMR3PHYSCHUNKUNMAPCB
3109{
3110 PVM pVM; /**< The VM handle. */
3111 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
3112} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
3113
3114
3115/**
3116 * Callback used to find the mapping that's been unused for
3117 * the longest time.
3118 */
3119static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
3120{
3121 do
3122 {
3123 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
3124 if ( pChunk->iAge
3125 && !pChunk->cRefs)
3126 {
3127 /*
3128 * Check that it's not in any of the TLBs.
3129 */
3130 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
3131 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3132 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
3133 {
3134 pChunk = NULL;
3135 break;
3136 }
3137 if (pChunk)
3138 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
3139 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
3140 {
3141 pChunk = NULL;
3142 break;
3143 }
3144 if (pChunk)
3145 {
3146 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
3147 return 1; /* done */
3148 }
3149 }
3150
3151 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
3152 pNode = pNode->pList;
3153 } while (pNode);
3154 return 0;
3155}
3156
3157
3158/**
3159 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
3160 *
3161 * The candidate will not be part of any TLBs, so no need to flush
3162 * anything afterwards.
3163 *
3164 * @returns Chunk id.
3165 * @param pVM The VM handle.
3166 */
3167static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
3168{
3169 Assert(PGMIsLockOwner(pVM));
3170
3171 /*
3172 * Do tree ageing first?
3173 */
3174 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
3175 PGMR3PhysChunkAgeing(pVM);
3176
3177 /*
3178 * Enumerate the age tree starting with the left most node.
3179 */
3180 PGMR3PHYSCHUNKUNMAPCB Args;
3181 Args.pVM = pVM;
3182 Args.pChunk = NULL;
3183 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
3184 return Args.pChunk->Core.Key;
3185 return INT32_MAX;
3186}
3187
3188
3189/**
3190 * Maps the given chunk into the ring-3 mapping cache.
3191 *
3192 * This will call ring-0.
3193 *
3194 * @returns VBox status code.
3195 * @param pVM The VM handle.
3196 * @param idChunk The chunk in question.
3197 * @param ppChunk Where to store the chunk tracking structure.
3198 *
3199 * @remarks Called from within the PGM critical section.
3200 */
3201int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
3202{
3203 int rc;
3204
3205 Assert(PGMIsLockOwner(pVM));
3206 /*
3207 * Allocate a new tracking structure first.
3208 */
3209#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3210 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
3211#else
3212 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
3213#endif
3214 AssertReturn(pChunk, VERR_NO_MEMORY);
3215 pChunk->Core.Key = idChunk;
3216 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
3217 pChunk->iAge = 0;
3218 pChunk->cRefs = 0;
3219 pChunk->cPermRefs = 0;
3220 pChunk->pv = NULL;
3221
3222 /*
3223 * Request the ring-0 part to map the chunk in question and if
3224 * necessary unmap another one to make space in the mapping cache.
3225 */
3226 GMMMAPUNMAPCHUNKREQ Req;
3227 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3228 Req.Hdr.cbReq = sizeof(Req);
3229 Req.pvR3 = NULL;
3230 Req.idChunkMap = idChunk;
3231 Req.idChunkUnmap = NIL_GMM_CHUNKID;
3232 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
3233 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
3234/** @todo This is wrong. Any thread in the VM process should be able to do this,
3235 * there are depenenecies on this. What currently saves the day is that
3236 * we don't unmap anything and that all non-zero memory will therefore
3237 * be present when non-EMTs tries to access it. */
3238 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
3239 if (RT_SUCCESS(rc))
3240 {
3241 /*
3242 * Update the tree.
3243 */
3244 /* insert the new one. */
3245 AssertPtr(Req.pvR3);
3246 pChunk->pv = Req.pvR3;
3247 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
3248 AssertRelease(fRc);
3249 pVM->pgm.s.ChunkR3Map.c++;
3250
3251 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3252 AssertRelease(fRc);
3253
3254 /* remove the unmapped one. */
3255 if (Req.idChunkUnmap != NIL_GMM_CHUNKID)
3256 {
3257 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
3258 AssertRelease(pUnmappedChunk);
3259 pUnmappedChunk->pv = NULL;
3260 pUnmappedChunk->Core.Key = UINT32_MAX;
3261#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3262 MMR3HeapFree(pUnmappedChunk);
3263#else
3264 MMR3UkHeapFree(pVM, pUnmappedChunk, MM_TAG_PGM_CHUNK_MAPPING);
3265#endif
3266 pVM->pgm.s.ChunkR3Map.c--;
3267
3268 /* Chunk removed, so clear the page map TBL as well (might still be referenced). */
3269 PGMPhysInvalidatePageMapTLB(pVM);
3270 }
3271 }
3272 else
3273 {
3274 AssertRC(rc);
3275#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3276 MMR3HeapFree(pChunk);
3277#else
3278 MMR3UkHeapFree(pVM, pChunk, MM_TAG_PGM_CHUNK_MAPPING);
3279#endif
3280 pChunk = NULL;
3281 }
3282
3283 *ppChunk = pChunk;
3284 return rc;
3285}
3286
3287
3288/**
3289 * For VMMCALLRING3_PGM_MAP_CHUNK, considered internal.
3290 *
3291 * @returns see pgmR3PhysChunkMap.
3292 * @param pVM The VM handle.
3293 * @param idChunk The chunk to map.
3294 */
3295VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
3296{
3297 PPGMCHUNKR3MAP pChunk;
3298 int rc;
3299
3300 pgmLock(pVM);
3301 rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
3302 pgmUnlock(pVM);
3303 return rc;
3304}
3305
3306
3307/**
3308 * Invalidates the TLB for the ring-3 mapping cache.
3309 *
3310 * @param pVM The VM handle.
3311 */
3312VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
3313{
3314 pgmLock(pVM);
3315 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3316 {
3317 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
3318 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
3319 }
3320 /* The page map TLB references chunks, so invalidate that one too. */
3321 PGMPhysInvalidatePageMapTLB(pVM);
3322 pgmUnlock(pVM);
3323}
3324
3325
3326/**
3327 * Response to VMMCALLRING3_PGM_ALLOCATE_LARGE_PAGE to allocate a large (2MB) page
3328 * for use with a nested paging PDE.
3329 *
3330 * @returns The following VBox status codes.
3331 * @retval VINF_SUCCESS on success.
3332 * @retval VINF_EM_NO_MEMORY if we're out of memory.
3333 *
3334 * @param pVM The VM handle.
3335 * @param GCPhys GC physical start address of the 2 MB range
3336 */
3337VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
3338{
3339 pgmLock(pVM);
3340
3341 STAM_PROFILE_START(&pVM->pgm.s.StatAllocLargePage, a);
3342 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE, 0, NULL);
3343 STAM_PROFILE_STOP(&pVM->pgm.s.StatAllocLargePage, a);
3344 if (RT_SUCCESS(rc))
3345 {
3346 Assert(pVM->pgm.s.cLargeHandyPages == 1);
3347
3348 uint32_t idPage = pVM->pgm.s.aLargeHandyPage[0].idPage;
3349 RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys;
3350
3351 void *pv;
3352
3353 /* Map the large page into our address space.
3354 *
3355 * Note: assuming that within the 2 MB range:
3356 * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise)
3357 * - user space mapping is continuous as well
3358 * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE)
3359 */
3360 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
3361 AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", idPage, HCPhys, rc));
3362
3363 if (RT_SUCCESS(rc))
3364 {
3365 /*
3366 * Clear the pages.
3367 */
3368 STAM_PROFILE_START(&pVM->pgm.s.StatClearLargePage, b);
3369 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
3370 {
3371 ASMMemZeroPage(pv);
3372
3373 PPGMPAGE pPage;
3374 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
3375 AssertRC(rc);
3376
3377 Assert(PGM_PAGE_IS_ZERO(pPage));
3378 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
3379 pVM->pgm.s.cZeroPages--;
3380
3381 /*
3382 * Do the PGMPAGE modifications.
3383 */
3384 pVM->pgm.s.cPrivatePages++;
3385 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
3386 PGM_PAGE_SET_PAGEID(pPage, idPage);
3387 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
3388 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PDE);
3389
3390 /* Somewhat dirty assumption that page ids are increasing. */
3391 idPage++;
3392
3393 HCPhys += PAGE_SIZE;
3394 GCPhys += PAGE_SIZE;
3395
3396 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
3397
3398 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
3399 }
3400 STAM_PROFILE_STOP(&pVM->pgm.s.StatClearLargePage, b);
3401
3402 /* Flush all TLBs. */
3403 PGM_INVL_ALL_VCPU_TLBS(pVM);
3404 PGMPhysInvalidatePageMapTLB(pVM);
3405 }
3406 pVM->pgm.s.cLargeHandyPages = 0;
3407 }
3408
3409 pgmUnlock(pVM);
3410 return rc;
3411}
3412
3413
3414/**
3415 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES.
3416 *
3417 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
3418 * signal and clear the out of memory condition. When contracted, this API is
3419 * used to try clear the condition when the user wants to resume.
3420 *
3421 * @returns The following VBox status codes.
3422 * @retval VINF_SUCCESS on success. FFs cleared.
3423 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
3424 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
3425 *
3426 * @param pVM The VM handle.
3427 *
3428 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
3429 * in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
3430 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
3431 * handler.
3432 */
3433VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
3434{
3435 pgmLock(pVM);
3436
3437 /*
3438 * Allocate more pages, noting down the index of the first new page.
3439 */
3440 uint32_t iClear = pVM->pgm.s.cHandyPages;
3441 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
3442 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
3443 int rcAlloc = VINF_SUCCESS;
3444 int rcSeed = VINF_SUCCESS;
3445 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3446 while (rc == VERR_GMM_SEED_ME)
3447 {
3448 void *pvChunk;
3449 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
3450 if (RT_SUCCESS(rc))
3451 {
3452 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
3453 if (RT_FAILURE(rc))
3454 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
3455 }
3456 if (RT_SUCCESS(rc))
3457 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3458 }
3459
3460 if (RT_SUCCESS(rc))
3461 {
3462 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
3463 Assert(pVM->pgm.s.cHandyPages > 0);
3464 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3465 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
3466
3467 /*
3468 * Clear the pages.
3469 */
3470 while (iClear < pVM->pgm.s.cHandyPages)
3471 {
3472 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
3473 void *pv;
3474 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
3475 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
3476 ASMMemZeroPage(pv);
3477 iClear++;
3478 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
3479 }
3480 }
3481 else
3482 {
3483 uint64_t cAllocPages, cMaxPages, cBalloonPages;
3484
3485 /*
3486 * We should never get here unless there is a genuine shortage of
3487 * memory (or some internal error). Flag the error so the VM can be
3488 * suspended ASAP and the user informed. If we're totally out of
3489 * handy pages we will return failure.
3490 */
3491 /* Report the failure. */
3492 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
3493 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
3494 rc, rcAlloc, rcSeed,
3495 pVM->pgm.s.cHandyPages,
3496 pVM->pgm.s.cAllPages,
3497 pVM->pgm.s.cPrivatePages,
3498 pVM->pgm.s.cSharedPages,
3499 pVM->pgm.s.cZeroPages));
3500
3501 if (GMMR3QueryMemoryStats(pVM, &cAllocPages, &cMaxPages, &cBalloonPages) == VINF_SUCCESS)
3502 {
3503 LogRel(("GMM: Statistics:\n"
3504 " Allocated pages: %RX64\n"
3505 " Maximum pages: %RX64\n"
3506 " Ballooned pages: %RX64\n", cAllocPages, cMaxPages, cBalloonPages));
3507 }
3508
3509 if ( rc != VERR_NO_MEMORY
3510 && rc != VERR_LOCK_FAILED)
3511 {
3512 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3513 {
3514 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
3515 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
3516 pVM->pgm.s.aHandyPages[i].idSharedPage));
3517 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
3518 if (idPage != NIL_GMM_PAGEID)
3519 {
3520 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
3521 pRam;
3522 pRam = pRam->pNextR3)
3523 {
3524 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
3525 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3526 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
3527 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
3528 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
3529 }
3530 }
3531 }
3532 }
3533
3534 /* Set the FFs and adjust rc. */
3535 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3536 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
3537 if ( rc == VERR_NO_MEMORY
3538 || rc == VERR_LOCK_FAILED)
3539 rc = VINF_EM_NO_MEMORY;
3540 }
3541
3542 pgmUnlock(pVM);
3543 return rc;
3544}
3545
3546
3547/**
3548 * Frees the specified RAM page and replaces it with the ZERO page.
3549 *
3550 * This is used by ballooning, remapping MMIO2 and RAM reset.
3551 *
3552 * @param pVM Pointer to the shared VM structure.
3553 * @param pReq Pointer to the request.
3554 * @param pPage Pointer to the page structure.
3555 * @param GCPhys The guest physical address of the page, if applicable.
3556 *
3557 * @remarks The caller must own the PGM lock.
3558 */
3559static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
3560{
3561 /*
3562 * Assert sanity.
3563 */
3564 Assert(PGMIsLockOwner(pVM));
3565 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
3566 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
3567 {
3568 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3569 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
3570 }
3571
3572 if ( PGM_PAGE_IS_ZERO(pPage)
3573 || PGM_PAGE_IS_BALLOONED(pPage))
3574 return VINF_SUCCESS;
3575
3576 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
3577 Log3(("pgmPhysFreePage: idPage=%#x HCPhys=%RGp pPage=%R[pgmpage]\n", idPage, pPage));
3578 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
3579 || idPage > GMM_PAGEID_LAST
3580 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
3581 {
3582 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3583 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
3584 }
3585
3586 /* update page count stats. */
3587 if (PGM_PAGE_IS_SHARED(pPage))
3588 pVM->pgm.s.cSharedPages--;
3589 else
3590 pVM->pgm.s.cPrivatePages--;
3591 pVM->pgm.s.cZeroPages++;
3592
3593 /* Deal with write monitored pages. */
3594 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
3595 {
3596 PGM_PAGE_SET_WRITTEN_TO(pPage);
3597 pVM->pgm.s.cWrittenToPages++;
3598 }
3599
3600 /*
3601 * pPage = ZERO page.
3602 */
3603 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
3604 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
3605 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
3606 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
3607
3608 /* Flush physical page map TLB entry. */
3609 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
3610
3611 /*
3612 * Make sure it's not in the handy page array.
3613 */
3614 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3615 {
3616 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
3617 {
3618 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
3619 break;
3620 }
3621 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
3622 {
3623 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
3624 break;
3625 }
3626 }
3627
3628 /*
3629 * Push it onto the page array.
3630 */
3631 uint32_t iPage = *pcPendingPages;
3632 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
3633 *pcPendingPages += 1;
3634
3635 pReq->aPages[iPage].idPage = idPage;
3636
3637 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
3638 return VINF_SUCCESS;
3639
3640 /*
3641 * Flush the pages.
3642 */
3643 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
3644 if (RT_SUCCESS(rc))
3645 {
3646 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
3647 *pcPendingPages = 0;
3648 }
3649 return rc;
3650}
3651
3652
3653/**
3654 * Converts a GC physical address to a HC ring-3 pointer, with some
3655 * additional checks.
3656 *
3657 * @returns VBox status code.
3658 * @retval VINF_SUCCESS on success.
3659 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3660 * access handler of some kind.
3661 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3662 * accesses or is odd in any way.
3663 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3664 *
3665 * @param pVM The VM handle.
3666 * @param GCPhys The GC physical address to convert.
3667 * @param fWritable Whether write access is required.
3668 * @param ppv Where to store the pointer corresponding to GCPhys on
3669 * success.
3670 */
3671VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
3672{
3673 pgmLock(pVM);
3674
3675 PPGMRAMRANGE pRam;
3676 PPGMPAGE pPage;
3677 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
3678 if (RT_SUCCESS(rc))
3679 {
3680 if (PGM_PAGE_IS_BALLOONED(pPage))
3681 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3682 else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3683 rc = VINF_SUCCESS;
3684 else
3685 {
3686 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3687 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3688 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3689 {
3690 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
3691 * in -norawr0 mode. */
3692 if (fWritable)
3693 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3694 }
3695 else
3696 {
3697 /* Temporarily disabled physical handler(s), since the recompiler
3698 doesn't get notified when it's reset we'll have to pretend it's
3699 operating normally. */
3700 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3701 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3702 else
3703 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3704 }
3705 }
3706 if (RT_SUCCESS(rc))
3707 {
3708 int rc2;
3709
3710 /* Make sure what we return is writable. */
3711 if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
3712 switch (PGM_PAGE_GET_STATE(pPage))
3713 {
3714 case PGM_PAGE_STATE_ALLOCATED:
3715 break;
3716 case PGM_PAGE_STATE_BALLOONED:
3717 AssertFailed();
3718 break;
3719 case PGM_PAGE_STATE_ZERO:
3720 case PGM_PAGE_STATE_SHARED:
3721 case PGM_PAGE_STATE_WRITE_MONITORED:
3722 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
3723 AssertLogRelRCReturn(rc2, rc2);
3724 break;
3725 }
3726
3727 /* Get a ring-3 mapping of the address. */
3728 PPGMPAGER3MAPTLBE pTlbe;
3729 rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
3730 AssertLogRelRCReturn(rc2, rc2);
3731 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
3732 /** @todo mapping/locking hell; this isn't horribly efficient since
3733 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
3734
3735 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3736 }
3737 else
3738 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3739
3740 /* else: handler catching all access, no pointer returned. */
3741 }
3742 else
3743 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3744
3745 pgmUnlock(pVM);
3746 return rc;
3747}
3748
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette