VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 28001

Last change on this file since 28001 was 28001, checked in by vboxsync, 15 years ago

Parameter description

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 131.7 KB
Line 
1/* $Id: PGMPhys.cpp 28001 2010-04-06 12:27:28Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM_PHYS
27#include <VBox/pgm.h>
28#include <VBox/iom.h>
29#include <VBox/mm.h>
30#include <VBox/stam.h>
31#include <VBox/rem.h>
32#include <VBox/pdmdev.h>
33#include "PGMInternal.h"
34#include <VBox/vm.h>
35#include "PGMInline.h"
36#include <VBox/sup.h>
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <iprt/thread.h>
44#include <iprt/string.h>
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50/** The number of pages to free in one batch. */
51#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
52
53
54/*******************************************************************************
55* Internal Functions *
56*******************************************************************************/
57static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
58static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
59
60
61/*
62 * PGMR3PhysReadU8-64
63 * PGMR3PhysWriteU8-64
64 */
65#define PGMPHYSFN_READNAME PGMR3PhysReadU8
66#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
67#define PGMPHYS_DATASIZE 1
68#define PGMPHYS_DATATYPE uint8_t
69#include "PGMPhysRWTmpl.h"
70
71#define PGMPHYSFN_READNAME PGMR3PhysReadU16
72#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
73#define PGMPHYS_DATASIZE 2
74#define PGMPHYS_DATATYPE uint16_t
75#include "PGMPhysRWTmpl.h"
76
77#define PGMPHYSFN_READNAME PGMR3PhysReadU32
78#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
79#define PGMPHYS_DATASIZE 4
80#define PGMPHYS_DATATYPE uint32_t
81#include "PGMPhysRWTmpl.h"
82
83#define PGMPHYSFN_READNAME PGMR3PhysReadU64
84#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
85#define PGMPHYS_DATASIZE 8
86#define PGMPHYS_DATATYPE uint64_t
87#include "PGMPhysRWTmpl.h"
88
89
90/**
91 * EMT worker for PGMR3PhysReadExternal.
92 */
93static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
94{
95 PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead);
96 return VINF_SUCCESS;
97}
98
99
100/**
101 * Write to physical memory, external users.
102 *
103 * @returns VBox status code.
104 * @retval VINF_SUCCESS.
105 *
106 * @param pVM VM Handle.
107 * @param GCPhys Physical address to write to.
108 * @param pvBuf What to write.
109 * @param cbWrite How many bytes to write.
110 *
111 * @thread Any but EMTs.
112 */
113VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
114{
115 VM_ASSERT_OTHER_THREAD(pVM);
116
117 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
118 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
119
120 pgmLock(pVM);
121
122 /*
123 * Copy loop on ram ranges.
124 */
125 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
126 for (;;)
127 {
128 /* Find range. */
129 while (pRam && GCPhys > pRam->GCPhysLast)
130 pRam = pRam->CTX_SUFF(pNext);
131 /* Inside range or not? */
132 if (pRam && GCPhys >= pRam->GCPhys)
133 {
134 /*
135 * Must work our way thru this page by page.
136 */
137 RTGCPHYS off = GCPhys - pRam->GCPhys;
138 while (off < pRam->cb)
139 {
140 unsigned iPage = off >> PAGE_SHIFT;
141 PPGMPAGE pPage = &pRam->aPages[iPage];
142
143 /*
144 * If the page has an ALL access handler, we'll have to
145 * delegate the job to EMT.
146 */
147 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
148 {
149 pgmUnlock(pVM);
150
151 return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 4,
152 pVM, &GCPhys, pvBuf, cbRead);
153 }
154 Assert(!PGM_PAGE_IS_MMIO(pPage));
155
156 /*
157 * Simple stuff, go ahead.
158 */
159 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
160 if (cb > cbRead)
161 cb = cbRead;
162 const void *pvSrc;
163 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
164 if (RT_SUCCESS(rc))
165 memcpy(pvBuf, pvSrc, cb);
166 else
167 {
168 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
169 pRam->GCPhys + off, pPage, rc));
170 memset(pvBuf, 0xff, cb);
171 }
172
173 /* next page */
174 if (cb >= cbRead)
175 {
176 pgmUnlock(pVM);
177 return VINF_SUCCESS;
178 }
179 cbRead -= cb;
180 off += cb;
181 GCPhys += cb;
182 pvBuf = (char *)pvBuf + cb;
183 } /* walk pages in ram range. */
184 }
185 else
186 {
187 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
188
189 /*
190 * Unassigned address space.
191 */
192 if (!pRam)
193 break;
194 size_t cb = pRam->GCPhys - GCPhys;
195 if (cb >= cbRead)
196 {
197 memset(pvBuf, 0xff, cbRead);
198 break;
199 }
200 memset(pvBuf, 0xff, cb);
201
202 cbRead -= cb;
203 pvBuf = (char *)pvBuf + cb;
204 GCPhys += cb;
205 }
206 } /* Ram range walk */
207
208 pgmUnlock(pVM);
209
210 return VINF_SUCCESS;
211}
212
213
214/**
215 * EMT worker for PGMR3PhysWriteExternal.
216 */
217static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
218{
219 /** @todo VERR_EM_NO_MEMORY */
220 PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite);
221 return VINF_SUCCESS;
222}
223
224
225/**
226 * Write to physical memory, external users.
227 *
228 * @returns VBox status code.
229 * @retval VINF_SUCCESS.
230 * @retval VERR_EM_NO_MEMORY.
231 *
232 * @param pVM VM Handle.
233 * @param GCPhys Physical address to write to.
234 * @param pvBuf What to write.
235 * @param cbWrite How many bytes to write.
236 * @param pszWho Who is writing. For tracking down who is writing
237 * after we've saved the state.
238 *
239 * @thread Any but EMTs.
240 */
241VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, const char *pszWho)
242{
243 VM_ASSERT_OTHER_THREAD(pVM);
244
245 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
246 ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x pszWho=%s\n",
247 GCPhys, cbWrite, pszWho));
248 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
249 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
250
251 pgmLock(pVM);
252
253 /*
254 * Copy loop on ram ranges, stop when we hit something difficult.
255 */
256 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
257 for (;;)
258 {
259 /* Find range. */
260 while (pRam && GCPhys > pRam->GCPhysLast)
261 pRam = pRam->CTX_SUFF(pNext);
262 /* Inside range or not? */
263 if (pRam && GCPhys >= pRam->GCPhys)
264 {
265 /*
266 * Must work our way thru this page by page.
267 */
268 RTGCPTR off = GCPhys - pRam->GCPhys;
269 while (off < pRam->cb)
270 {
271 RTGCPTR iPage = off >> PAGE_SHIFT;
272 PPGMPAGE pPage = &pRam->aPages[iPage];
273
274 /*
275 * Is the page problematic, we have to do the work on the EMT.
276 *
277 * Allocating writable pages and access handlers are
278 * problematic, write monitored pages are simple and can be
279 * dealth with here.
280 */
281 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
282 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
283 {
284 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
285 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
286 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
287 else
288 {
289 pgmUnlock(pVM);
290
291 return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 4,
292 pVM, &GCPhys, pvBuf, cbWrite);
293 }
294 }
295 Assert(!PGM_PAGE_IS_MMIO(pPage));
296
297 /*
298 * Simple stuff, go ahead.
299 */
300 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
301 if (cb > cbWrite)
302 cb = cbWrite;
303 void *pvDst;
304 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
305 if (RT_SUCCESS(rc))
306 memcpy(pvDst, pvBuf, cb);
307 else
308 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
309 pRam->GCPhys + off, pPage, rc));
310
311 /* next page */
312 if (cb >= cbWrite)
313 {
314 pgmUnlock(pVM);
315 return VINF_SUCCESS;
316 }
317
318 cbWrite -= cb;
319 off += cb;
320 GCPhys += cb;
321 pvBuf = (const char *)pvBuf + cb;
322 } /* walk pages in ram range */
323 }
324 else
325 {
326 /*
327 * Unassigned address space, skip it.
328 */
329 if (!pRam)
330 break;
331 size_t cb = pRam->GCPhys - GCPhys;
332 if (cb >= cbWrite)
333 break;
334 cbWrite -= cb;
335 pvBuf = (const char *)pvBuf + cb;
336 GCPhys += cb;
337 }
338 } /* Ram range walk */
339
340 pgmUnlock(pVM);
341 return VINF_SUCCESS;
342}
343
344
345/**
346 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
347 *
348 * @returns see PGMR3PhysGCPhys2CCPtrExternal
349 * @param pVM The VM handle.
350 * @param pGCPhys Pointer to the guest physical address.
351 * @param ppv Where to store the mapping address.
352 * @param pLock Where to store the lock.
353 */
354static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
355{
356 /*
357 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
358 * an access handler after it succeeds.
359 */
360 int rc = pgmLock(pVM);
361 AssertRCReturn(rc, rc);
362
363 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
364 if (RT_SUCCESS(rc))
365 {
366 PPGMPAGEMAPTLBE pTlbe;
367 int rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, *pGCPhys, &pTlbe);
368 AssertFatalRC(rc2);
369 PPGMPAGE pPage = pTlbe->pPage;
370 if (PGM_PAGE_IS_MMIO(pPage))
371 {
372 PGMPhysReleasePageMappingLock(pVM, pLock);
373 rc = VERR_PGM_PHYS_PAGE_RESERVED;
374 }
375 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
376#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
377 || pgmPoolIsDirtyPage(pVM, *pGCPhys)
378#endif
379 )
380 {
381 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
382 * not be informed about writes and keep bogus gst->shw mappings around.
383 */
384 pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
385 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
386 /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
387 * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
388 }
389 }
390
391 pgmUnlock(pVM);
392 return rc;
393}
394
395
396/**
397 * Requests the mapping of a guest page into ring-3, external threads.
398 *
399 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
400 * release it.
401 *
402 * This API will assume your intention is to write to the page, and will
403 * therefore replace shared and zero pages. If you do not intend to modify the
404 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
405 *
406 * @returns VBox status code.
407 * @retval VINF_SUCCESS on success.
408 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
409 * backing or if the page has any active access handlers. The caller
410 * must fall back on using PGMR3PhysWriteExternal.
411 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
412 *
413 * @param pVM The VM handle.
414 * @param GCPhys The guest physical address of the page that should be mapped.
415 * @param ppv Where to store the address corresponding to GCPhys.
416 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
417 *
418 * @remark Avoid calling this API from within critical sections (other than the
419 * PGM one) because of the deadlock risk when we have to delegating the
420 * task to an EMT.
421 * @thread Any.
422 */
423VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
424{
425 AssertPtr(ppv);
426 AssertPtr(pLock);
427
428 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
429
430 int rc = pgmLock(pVM);
431 AssertRCReturn(rc, rc);
432
433 /*
434 * Query the Physical TLB entry for the page (may fail).
435 */
436 PPGMPAGEMAPTLBE pTlbe;
437 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
438 if (RT_SUCCESS(rc))
439 {
440 PPGMPAGE pPage = pTlbe->pPage;
441 if (PGM_PAGE_IS_MMIO(pPage))
442 rc = VERR_PGM_PHYS_PAGE_RESERVED;
443 else
444 {
445 /*
446 * If the page is shared, the zero page, or being write monitored
447 * it must be converted to an page that's writable if possible.
448 * We can only deal with write monitored pages here, the rest have
449 * to be on an EMT.
450 */
451 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
452 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
453#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
454 || pgmPoolIsDirtyPage(pVM, GCPhys)
455#endif
456 )
457 {
458 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
459 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
460#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
461 && !pgmPoolIsDirtyPage(pVM, GCPhys)
462#endif
463 )
464 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
465 else
466 {
467 pgmUnlock(pVM);
468
469 return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
470 pVM, &GCPhys, ppv, pLock);
471 }
472 }
473
474 /*
475 * Now, just perform the locking and calculate the return address.
476 */
477 PPGMPAGEMAP pMap = pTlbe->pMap;
478 if (pMap)
479 pMap->cRefs++;
480
481 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
482 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
483 {
484 if (cLocks == 0)
485 pVM->pgm.s.cWriteLockedPages++;
486 PGM_PAGE_INC_WRITE_LOCKS(pPage);
487 }
488 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
489 {
490 PGM_PAGE_INC_WRITE_LOCKS(pPage);
491 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
492 if (pMap)
493 pMap->cRefs++; /* Extra ref to prevent it from going away. */
494 }
495
496 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
497 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
498 pLock->pvMap = pMap;
499 }
500 }
501
502 pgmUnlock(pVM);
503 return rc;
504}
505
506
507/**
508 * Requests the mapping of a guest page into ring-3, external threads.
509 *
510 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
511 * release it.
512 *
513 * @returns VBox status code.
514 * @retval VINF_SUCCESS on success.
515 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
516 * backing or if the page as an active ALL access handler. The caller
517 * must fall back on using PGMPhysRead.
518 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
519 *
520 * @param pVM The VM handle.
521 * @param GCPhys The guest physical address of the page that should be mapped.
522 * @param ppv Where to store the address corresponding to GCPhys.
523 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
524 *
525 * @remark Avoid calling this API from within critical sections (other than
526 * the PGM one) because of the deadlock risk.
527 * @thread Any.
528 */
529VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
530{
531 int rc = pgmLock(pVM);
532 AssertRCReturn(rc, rc);
533
534 /*
535 * Query the Physical TLB entry for the page (may fail).
536 */
537 PPGMPAGEMAPTLBE pTlbe;
538 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
539 if (RT_SUCCESS(rc))
540 {
541 PPGMPAGE pPage = pTlbe->pPage;
542#if 1
543 /* MMIO pages doesn't have any readable backing. */
544 if (PGM_PAGE_IS_MMIO(pPage))
545 rc = VERR_PGM_PHYS_PAGE_RESERVED;
546#else
547 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
548 rc = VERR_PGM_PHYS_PAGE_RESERVED;
549#endif
550 else
551 {
552 /*
553 * Now, just perform the locking and calculate the return address.
554 */
555 PPGMPAGEMAP pMap = pTlbe->pMap;
556 if (pMap)
557 pMap->cRefs++;
558
559 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
560 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
561 {
562 if (cLocks == 0)
563 pVM->pgm.s.cReadLockedPages++;
564 PGM_PAGE_INC_READ_LOCKS(pPage);
565 }
566 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
567 {
568 PGM_PAGE_INC_READ_LOCKS(pPage);
569 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
570 if (pMap)
571 pMap->cRefs++; /* Extra ref to prevent it from going away. */
572 }
573
574 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
575 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
576 pLock->pvMap = pMap;
577 }
578 }
579
580 pgmUnlock(pVM);
581 return rc;
582}
583
584
585/**
586 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
587 *
588 * Called when anything was relocated.
589 *
590 * @param pVM Pointer to the shared VM structure.
591 */
592void pgmR3PhysRelinkRamRanges(PVM pVM)
593{
594 PPGMRAMRANGE pCur;
595
596#ifdef VBOX_STRICT
597 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
598 {
599 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
600 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
601 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
602 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
603 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
604 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
605 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesR3; pCur2; pCur2 = pCur2->pNextR3)
606 Assert( pCur2 == pCur
607 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
608 }
609#endif
610
611 pCur = pVM->pgm.s.pRamRangesR3;
612 if (pCur)
613 {
614 pVM->pgm.s.pRamRangesR0 = pCur->pSelfR0;
615 pVM->pgm.s.pRamRangesRC = pCur->pSelfRC;
616
617 for (; pCur->pNextR3; pCur = pCur->pNextR3)
618 {
619 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
620 pCur->pNextRC = pCur->pNextR3->pSelfRC;
621 }
622
623 Assert(pCur->pNextR0 == NIL_RTR0PTR);
624 Assert(pCur->pNextRC == NIL_RTRCPTR);
625 }
626 else
627 {
628 Assert(pVM->pgm.s.pRamRangesR0 == NIL_RTR0PTR);
629 Assert(pVM->pgm.s.pRamRangesRC == NIL_RTRCPTR);
630 }
631 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
632}
633
634
635/**
636 * Links a new RAM range into the list.
637 *
638 * @param pVM Pointer to the shared VM structure.
639 * @param pNew Pointer to the new list entry.
640 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
641 */
642static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
643{
644 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
645 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
646 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
647
648 pgmLock(pVM);
649
650 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
651 pNew->pNextR3 = pRam;
652 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
653 pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
654
655 if (pPrev)
656 {
657 pPrev->pNextR3 = pNew;
658 pPrev->pNextR0 = pNew->pSelfR0;
659 pPrev->pNextRC = pNew->pSelfRC;
660 }
661 else
662 {
663 pVM->pgm.s.pRamRangesR3 = pNew;
664 pVM->pgm.s.pRamRangesR0 = pNew->pSelfR0;
665 pVM->pgm.s.pRamRangesRC = pNew->pSelfRC;
666 }
667 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
668 pgmUnlock(pVM);
669}
670
671
672/**
673 * Unlink an existing RAM range from the list.
674 *
675 * @param pVM Pointer to the shared VM structure.
676 * @param pRam Pointer to the new list entry.
677 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
678 */
679static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
680{
681 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
682 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
683 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
684
685 pgmLock(pVM);
686
687 PPGMRAMRANGE pNext = pRam->pNextR3;
688 if (pPrev)
689 {
690 pPrev->pNextR3 = pNext;
691 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
692 pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
693 }
694 else
695 {
696 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
697 pVM->pgm.s.pRamRangesR3 = pNext;
698 pVM->pgm.s.pRamRangesR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
699 pVM->pgm.s.pRamRangesRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
700 }
701 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
702 pgmUnlock(pVM);
703}
704
705
706/**
707 * Unlink an existing RAM range from the list.
708 *
709 * @param pVM Pointer to the shared VM structure.
710 * @param pRam Pointer to the new list entry.
711 */
712static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
713{
714 pgmLock(pVM);
715
716 /* find prev. */
717 PPGMRAMRANGE pPrev = NULL;
718 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
719 while (pCur != pRam)
720 {
721 pPrev = pCur;
722 pCur = pCur->pNextR3;
723 }
724 AssertFatal(pCur);
725
726 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
727 pgmUnlock(pVM);
728}
729
730
731/**
732 * Frees a range of pages, replacing them with ZERO pages of the specified type.
733 *
734 * @returns VBox status code.
735 * @param pVM The VM handle.
736 * @param pRam The RAM range in which the pages resides.
737 * @param GCPhys The address of the first page.
738 * @param GCPhysLast The address of the last page.
739 * @param uType The page type to replace then with.
740 */
741static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
742{
743 Assert(PGMIsLockOwner(pVM));
744 uint32_t cPendingPages = 0;
745 PGMMFREEPAGESREQ pReq;
746 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
747 AssertLogRelRCReturn(rc, rc);
748
749 /* Iterate the pages. */
750 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
751 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
752 while (cPagesLeft-- > 0)
753 {
754 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
755 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
756
757 PGM_PAGE_SET_TYPE(pPageDst, uType);
758
759 GCPhys += PAGE_SIZE;
760 pPageDst++;
761 }
762
763 if (cPendingPages)
764 {
765 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
766 AssertLogRelRCReturn(rc, rc);
767 }
768 GMMR3FreePagesCleanup(pReq);
769
770 return rc;
771}
772
773/**
774 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
775 *
776 * This is only called on one of the EMTs while the other ones are waiting for
777 * it to complete this function.
778 *
779 * @returns VINF_SUCCESS (VBox strict status code).
780 * @param pVM The VM handle.
781 * @param pVCpu The VMCPU for the EMT we're being called on. Unused.
782 * @param pvUser User parameter
783 */
784static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
785{
786 uintptr_t *paUser = (uintptr_t *)pvUser;
787 bool fInflate = !!paUser[0];
788 unsigned cPages = paUser[1];
789 RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
790 uint32_t cPendingPages = 0;
791 PGMMFREEPAGESREQ pReq;
792 int rc;
793
794 Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
795 pgmLock(pVM);
796
797 if (fInflate)
798 {
799 /* Replace pages with ZERO pages. */
800 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
801 if (RT_FAILURE(rc))
802 {
803 pgmUnlock(pVM);
804 AssertLogRelRC(rc);
805 return rc;
806 }
807
808 /* Iterate the pages. */
809 for (unsigned i = 0; i < cPages; i++)
810 {
811 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, paPhysPage[i]);
812 if ( pPage == NULL
813 || pPage->uTypeY != PGMPAGETYPE_RAM)
814 {
815 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], (pPage) ? pPage->uTypeY : 0));
816 break;
817 }
818
819 LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
820
821 /* Flush the shadow PT if this page was previously used as a guest page table. */
822 pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
823
824 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i]);
825 if (RT_FAILURE(rc))
826 {
827 pgmUnlock(pVM);
828 AssertLogRelRC(rc);
829 return rc;
830 }
831 Assert(PGM_PAGE_IS_ZERO(pPage));
832 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_BALLOONED);
833 }
834
835 if (cPendingPages)
836 {
837 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
838 if (RT_FAILURE(rc))
839 {
840 pgmUnlock(pVM);
841 AssertLogRelRC(rc);
842 return rc;
843 }
844 }
845 GMMR3FreePagesCleanup(pReq);
846
847 /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
848 pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
849 }
850 else
851 {
852 /* Iterate the pages. */
853 for (unsigned i = 0; i < cPages; i++)
854 {
855 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, paPhysPage[i]);
856 AssertBreak(pPage && pPage->uTypeY == PGMPAGETYPE_RAM);
857
858 LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
859
860 Assert(PGM_PAGE_IS_BALLOONED(pPage));
861
862 /* Change back to zero page. */
863 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
864 }
865
866 /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
867 }
868
869 /* Notify GMM about the balloon change. */
870 rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
871 if (RT_SUCCESS(rc))
872 {
873 if (!fInflate)
874 {
875 Assert(pVM->pgm.s.cBalloonedPages >= cPages);
876 pVM->pgm.s.cBalloonedPages -= cPages;
877 }
878 else
879 pVM->pgm.s.cBalloonedPages += cPages;
880 }
881
882 pgmUnlock(pVM);
883
884 /* Flush the recompiler's TLB as well. */
885 for (unsigned i = 0; i < pVM->cCpus; i++)
886 CPUMSetChangedFlags(&pVM->aCpus[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
887
888 AssertLogRelRC(rc);
889 return rc;
890}
891
892/**
893 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
894 *
895 * @returns VBox status code.
896 * @param pVM The VM handle.
897 * @param fInflate Inflate or deflate memory balloon
898 * @param cPages Number of pages to free
899 * @param paPhysPage Array of guest physical addresses
900 */
901static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
902{
903 uintptr_t paUser[3];
904
905 paUser[0] = fInflate;
906 paUser[1] = cPages;
907 paUser[2] = (uintptr_t)paPhysPage;
908 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
909 AssertRC(rc);
910
911 /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
912 RTMemFree(paPhysPage);
913}
914
915/**
916 * Inflate or deflate a memory balloon
917 *
918 * @returns VBox status code.
919 * @param pVM The VM handle.
920 * @param fInflate Inflate or deflate memory balloon
921 * @param cPages Number of pages to free
922 * @param paPhysPage Array of guest physical addresses
923 */
924VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
925{
926 int rc;
927
928 /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
929 * In the SMP case we post a request packet to postpone the job.
930 */
931 if (pVM->cCpus > 1)
932 {
933 unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
934 RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
935 AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
936
937 memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
938
939 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
940 AssertRC(rc);
941 }
942 else
943 {
944 uintptr_t paUser[3];
945
946 paUser[0] = fInflate;
947 paUser[1] = cPages;
948 paUser[2] = (uintptr_t)paPhysPage;
949 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
950 AssertRC(rc);
951 }
952 return rc;
953}
954
955/**
956 * Query the VM and host balloon sizes
957 *
958 * @returns VBox status code.
959 * @param pVM The VM handle.
960 * @param puBalloonVM Pointer to VM balloon size (in pages)
961 * @param puBalloonAllVMs Pointer to total balloon size of all VMs (in pages)
962 */
963VMMR3DECL(int) PGMR3QueryBalloonSize(PVM pVM, uint64_t *puBalloonVM, uint64_t *puBalloonAllVMs)
964{
965 int rc = VINF_SUCCESS;
966
967 if (puBalloonVM)
968 *puBalloonVM = pVM->pgm.s.cBalloonedPages;
969
970 if (puBalloonAllVMs)
971 {
972 *puBalloonAllVMs = 0;
973 rc = GMMR3QueryTotalBalloonSize(pVM, puBalloonAllVMs);
974 AssertRC(rc);
975 }
976
977 return rc;
978}
979
980/**
981 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
982 *
983 * @param pVM The VM handle.
984 * @param pNew The new RAM range.
985 * @param GCPhys The address of the RAM range.
986 * @param GCPhysLast The last address of the RAM range.
987 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
988 * if in HMA.
989 * @param R0PtrNew Ditto for R0.
990 * @param pszDesc The description.
991 * @param pPrev The previous RAM range (for linking).
992 */
993static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
994 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
995{
996 /*
997 * Initialize the range.
998 */
999 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
1000 pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
1001 pNew->GCPhys = GCPhys;
1002 pNew->GCPhysLast = GCPhysLast;
1003 pNew->cb = GCPhysLast - GCPhys + 1;
1004 pNew->pszDesc = pszDesc;
1005 pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
1006 pNew->pvR3 = NULL;
1007 pNew->paLSPages = NULL;
1008
1009 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
1010 RTGCPHYS iPage = cPages;
1011 while (iPage-- > 0)
1012 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
1013
1014 /* Update the page count stats. */
1015 pVM->pgm.s.cZeroPages += cPages;
1016 pVM->pgm.s.cAllPages += cPages;
1017
1018 /*
1019 * Link it.
1020 */
1021 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
1022}
1023
1024
1025/**
1026 * Relocate a floating RAM range.
1027 *
1028 * @copydoc FNPGMRELOCATE.
1029 */
1030static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
1031{
1032 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
1033 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
1034 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE);
1035
1036 switch (enmMode)
1037 {
1038 case PGMRELOCATECALL_SUGGEST:
1039 return true;
1040 case PGMRELOCATECALL_RELOCATE:
1041 {
1042 /* Update myself and then relink all the ranges. */
1043 pgmLock(pVM);
1044 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
1045 pgmR3PhysRelinkRamRanges(pVM);
1046 pgmUnlock(pVM);
1047 return true;
1048 }
1049
1050 default:
1051 AssertFailedReturn(false);
1052 }
1053}
1054
1055
1056/**
1057 * PGMR3PhysRegisterRam worker that registers a high chunk.
1058 *
1059 * @returns VBox status code.
1060 * @param pVM The VM handle.
1061 * @param GCPhys The address of the RAM.
1062 * @param cRamPages The number of RAM pages to register.
1063 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
1064 * @param iChunk The chunk number.
1065 * @param pszDesc The RAM range description.
1066 * @param ppPrev Previous RAM range pointer. In/Out.
1067 */
1068static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
1069 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
1070 PPGMRAMRANGE *ppPrev)
1071{
1072 const char *pszDescChunk = iChunk == 0
1073 ? pszDesc
1074 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
1075 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
1076
1077 /*
1078 * Allocate memory for the new chunk.
1079 */
1080 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
1081 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
1082 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
1083 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
1084 void *pvChunk = NULL;
1085 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
1086#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1087 VMMIsHwVirtExtForced(pVM) ? &R0PtrChunk : NULL,
1088#else
1089 NULL,
1090#endif
1091 paChunkPages);
1092 if (RT_SUCCESS(rc))
1093 {
1094#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1095 if (!VMMIsHwVirtExtForced(pVM))
1096 R0PtrChunk = NIL_RTR0PTR;
1097#else
1098 R0PtrChunk = (uintptr_t)pvChunk;
1099#endif
1100 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
1101
1102 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
1103
1104 /*
1105 * Create a mapping and map the pages into it.
1106 * We push these in below the HMA.
1107 */
1108 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
1109 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
1110 if (RT_SUCCESS(rc))
1111 {
1112 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
1113
1114 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
1115 RTGCPTR GCPtrPage = GCPtrChunk;
1116 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
1117 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
1118 if (RT_SUCCESS(rc))
1119 {
1120 /*
1121 * Ok, init and link the range.
1122 */
1123 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
1124 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
1125 *ppPrev = pNew;
1126 }
1127 }
1128
1129 if (RT_FAILURE(rc))
1130 SUPR3PageFreeEx(pvChunk, cChunkPages);
1131 }
1132
1133 RTMemTmpFree(paChunkPages);
1134 return rc;
1135}
1136
1137
1138/**
1139 * Sets up a range RAM.
1140 *
1141 * This will check for conflicting registrations, make a resource
1142 * reservation for the memory (with GMM), and setup the per-page
1143 * tracking structures (PGMPAGE).
1144 *
1145 * @returns VBox stutus code.
1146 * @param pVM Pointer to the shared VM structure.
1147 * @param GCPhys The physical address of the RAM.
1148 * @param cb The size of the RAM.
1149 * @param pszDesc The description - not copied, so, don't free or change it.
1150 */
1151VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
1152{
1153 /*
1154 * Validate input.
1155 */
1156 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
1157 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1158 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1159 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
1160 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1161 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
1162 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1163 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1164
1165 pgmLock(pVM);
1166
1167 /*
1168 * Find range location and check for conflicts.
1169 * (We don't lock here because the locking by EMT is only required on update.)
1170 */
1171 PPGMRAMRANGE pPrev = NULL;
1172 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1173 while (pRam && GCPhysLast >= pRam->GCPhys)
1174 {
1175 if ( GCPhysLast >= pRam->GCPhys
1176 && GCPhys <= pRam->GCPhysLast)
1177 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1178 GCPhys, GCPhysLast, pszDesc,
1179 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1180 VERR_PGM_RAM_CONFLICT);
1181
1182 /* next */
1183 pPrev = pRam;
1184 pRam = pRam->pNextR3;
1185 }
1186
1187 /*
1188 * Register it with GMM (the API bitches).
1189 */
1190 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
1191 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
1192 if (RT_FAILURE(rc))
1193 {
1194 pgmUnlock(pVM);
1195 return rc;
1196 }
1197
1198 if ( GCPhys >= _4G
1199 && cPages > 256)
1200 {
1201 /*
1202 * The PGMRAMRANGE structures for the high memory can get very big.
1203 * In order to avoid SUPR3PageAllocEx allocation failures due to the
1204 * allocation size limit there and also to avoid being unable to find
1205 * guest mapping space for them, we split this memory up into 4MB in
1206 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
1207 * mode.
1208 *
1209 * The first and last page of each mapping are guard pages and marked
1210 * not-present. So, we've got 4186112 and 16769024 bytes available for
1211 * the PGMRAMRANGE structure.
1212 *
1213 * Note! The sizes used here will influence the saved state.
1214 */
1215 uint32_t cbChunk;
1216 uint32_t cPagesPerChunk;
1217 if (VMMIsHwVirtExtForced(pVM))
1218 {
1219 cbChunk = 16U*_1M;
1220 cPagesPerChunk = 1048048; /* max ~1048059 */
1221 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
1222 }
1223 else
1224 {
1225 cbChunk = 4U*_1M;
1226 cPagesPerChunk = 261616; /* max ~261627 */
1227 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616 < 4U*_1M - PAGE_SIZE * 2);
1228 }
1229 AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
1230
1231 RTGCPHYS cPagesLeft = cPages;
1232 RTGCPHYS GCPhysChunk = GCPhys;
1233 uint32_t iChunk = 0;
1234 while (cPagesLeft > 0)
1235 {
1236 uint32_t cPagesInChunk = cPagesLeft;
1237 if (cPagesInChunk > cPagesPerChunk)
1238 cPagesInChunk = cPagesPerChunk;
1239
1240 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1241 AssertRCReturn(rc, rc);
1242
1243 /* advance */
1244 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1245 cPagesLeft -= cPagesInChunk;
1246 iChunk++;
1247 }
1248 }
1249 else
1250 {
1251 /*
1252 * Allocate, initialize and link the new RAM range.
1253 */
1254 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1255 PPGMRAMRANGE pNew;
1256 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1257 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1258
1259 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1260 }
1261 PGMPhysInvalidatePageMapTLB(pVM);
1262 pgmUnlock(pVM);
1263
1264 /*
1265 * Notify REM.
1266 */
1267 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
1268
1269 return VINF_SUCCESS;
1270}
1271
1272
1273/**
1274 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
1275 *
1276 * We do this late in the init process so that all the ROM and MMIO ranges have
1277 * been registered already and we don't go wasting memory on them.
1278 *
1279 * @returns VBox status code.
1280 *
1281 * @param pVM Pointer to the shared VM structure.
1282 */
1283int pgmR3PhysRamPreAllocate(PVM pVM)
1284{
1285 Assert(pVM->pgm.s.fRamPreAlloc);
1286 Log(("pgmR3PhysRamPreAllocate: enter\n"));
1287
1288 /*
1289 * Walk the RAM ranges and allocate all RAM pages, halt at
1290 * the first allocation error.
1291 */
1292 uint64_t cPages = 0;
1293 uint64_t NanoTS = RTTimeNanoTS();
1294 pgmLock(pVM);
1295 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
1296 {
1297 PPGMPAGE pPage = &pRam->aPages[0];
1298 RTGCPHYS GCPhys = pRam->GCPhys;
1299 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
1300 while (cLeft-- > 0)
1301 {
1302 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1303 {
1304 switch (PGM_PAGE_GET_STATE(pPage))
1305 {
1306 case PGM_PAGE_STATE_ZERO:
1307 {
1308 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
1309 if (RT_FAILURE(rc))
1310 {
1311 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
1312 pgmUnlock(pVM);
1313 return rc;
1314 }
1315 cPages++;
1316 break;
1317 }
1318
1319 case PGM_PAGE_STATE_BALLOONED:
1320 case PGM_PAGE_STATE_ALLOCATED:
1321 case PGM_PAGE_STATE_WRITE_MONITORED:
1322 case PGM_PAGE_STATE_SHARED:
1323 /* nothing to do here. */
1324 break;
1325 }
1326 }
1327
1328 /* next */
1329 pPage++;
1330 GCPhys += PAGE_SIZE;
1331 }
1332 }
1333 pgmUnlock(pVM);
1334 NanoTS = RTTimeNanoTS() - NanoTS;
1335
1336 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
1337 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
1338 return VINF_SUCCESS;
1339}
1340
1341
1342/**
1343 * Resets (zeros) the RAM.
1344 *
1345 * ASSUMES that the caller owns the PGM lock.
1346 *
1347 * @returns VBox status code.
1348 * @param pVM Pointer to the shared VM structure.
1349 */
1350int pgmR3PhysRamReset(PVM pVM)
1351{
1352 Assert(PGMIsLockOwner(pVM));
1353
1354 /* Reset the memory balloon. */
1355 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
1356 AssertRC(rc);
1357
1358 /*
1359 * We batch up pages that should be freed instead of calling GMM for
1360 * each and every one of them.
1361 */
1362 uint32_t cPendingPages = 0;
1363 PGMMFREEPAGESREQ pReq;
1364 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1365 AssertLogRelRCReturn(rc, rc);
1366
1367 /*
1368 * Walk the ram ranges.
1369 */
1370 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
1371 {
1372 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
1373 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
1374
1375 if (!pVM->pgm.s.fRamPreAlloc)
1376 {
1377 /* Replace all RAM pages by ZERO pages. */
1378 while (iPage-- > 0)
1379 {
1380 PPGMPAGE pPage = &pRam->aPages[iPage];
1381 switch (PGM_PAGE_GET_TYPE(pPage))
1382 {
1383 case PGMPAGETYPE_RAM:
1384 /* Do not replace pages part of a 2 MB continuous range with zero pages, but zero them instead. */
1385 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
1386 {
1387 void *pvPage;
1388 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
1389 AssertLogRelRCReturn(rc, rc);
1390 ASMMemZeroPage(pvPage);
1391 }
1392 else
1393 if (PGM_PAGE_IS_BALLOONED(pPage))
1394 {
1395 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
1396 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
1397 }
1398 else
1399 if (!PGM_PAGE_IS_ZERO(pPage))
1400 {
1401 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1402 AssertLogRelRCReturn(rc, rc);
1403 }
1404 break;
1405
1406 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1407 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1408 break;
1409
1410 case PGMPAGETYPE_MMIO2:
1411 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
1412 case PGMPAGETYPE_ROM:
1413 case PGMPAGETYPE_MMIO:
1414 break;
1415 default:
1416 AssertFailed();
1417 }
1418 } /* for each page */
1419 }
1420 else
1421 {
1422 /* Zero the memory. */
1423 while (iPage-- > 0)
1424 {
1425 PPGMPAGE pPage = &pRam->aPages[iPage];
1426 switch (PGM_PAGE_GET_TYPE(pPage))
1427 {
1428 case PGMPAGETYPE_RAM:
1429 switch (PGM_PAGE_GET_STATE(pPage))
1430 {
1431 case PGM_PAGE_STATE_ZERO:
1432 break;
1433
1434 case PGM_PAGE_STATE_BALLOONED:
1435 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
1436 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
1437 break;
1438
1439 case PGM_PAGE_STATE_SHARED:
1440 case PGM_PAGE_STATE_WRITE_MONITORED:
1441 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1442 AssertLogRelRCReturn(rc, rc);
1443 /* no break */
1444
1445 case PGM_PAGE_STATE_ALLOCATED:
1446 {
1447 void *pvPage;
1448 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
1449 AssertLogRelRCReturn(rc, rc);
1450 ASMMemZeroPage(pvPage);
1451 break;
1452 }
1453 }
1454 break;
1455
1456 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1457 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1458 break;
1459
1460 case PGMPAGETYPE_MMIO2:
1461 case PGMPAGETYPE_ROM_SHADOW:
1462 case PGMPAGETYPE_ROM:
1463 case PGMPAGETYPE_MMIO:
1464 break;
1465 default:
1466 AssertFailed();
1467
1468 }
1469 } /* for each page */
1470 }
1471
1472 }
1473
1474 /*
1475 * Finish off any pages pending freeing.
1476 */
1477 if (cPendingPages)
1478 {
1479 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1480 AssertLogRelRCReturn(rc, rc);
1481 }
1482 GMMR3FreePagesCleanup(pReq);
1483
1484 return VINF_SUCCESS;
1485}
1486
1487
1488/**
1489 * This is the interface IOM is using to register an MMIO region.
1490 *
1491 * It will check for conflicts and ensure that a RAM range structure
1492 * is present before calling the PGMR3HandlerPhysicalRegister API to
1493 * register the callbacks.
1494 *
1495 * @returns VBox status code.
1496 *
1497 * @param pVM Pointer to the shared VM structure.
1498 * @param GCPhys The start of the MMIO region.
1499 * @param cb The size of the MMIO region.
1500 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
1501 * @param pvUserR3 The user argument for R3.
1502 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
1503 * @param pvUserR0 The user argument for R0.
1504 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
1505 * @param pvUserRC The user argument for RC.
1506 * @param pszDesc The description of the MMIO region.
1507 */
1508VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
1509 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
1510 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
1511 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
1512 R3PTRTYPE(const char *) pszDesc)
1513{
1514 /*
1515 * Assert on some assumption.
1516 */
1517 VM_ASSERT_EMT(pVM);
1518 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1519 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1520 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1521 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1522
1523 /*
1524 * Make sure there's a RAM range structure for the region.
1525 */
1526 int rc;
1527 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1528 bool fRamExists = false;
1529 PPGMRAMRANGE pRamPrev = NULL;
1530 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1531 while (pRam && GCPhysLast >= pRam->GCPhys)
1532 {
1533 if ( GCPhysLast >= pRam->GCPhys
1534 && GCPhys <= pRam->GCPhysLast)
1535 {
1536 /* Simplification: all within the same range. */
1537 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1538 && GCPhysLast <= pRam->GCPhysLast,
1539 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
1540 GCPhys, GCPhysLast, pszDesc,
1541 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1542 VERR_PGM_RAM_CONFLICT);
1543
1544 /* Check that it's all RAM or MMIO pages. */
1545 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1546 uint32_t cLeft = cb >> PAGE_SHIFT;
1547 while (cLeft-- > 0)
1548 {
1549 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
1550 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
1551 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
1552 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
1553 VERR_PGM_RAM_CONFLICT);
1554 pPage++;
1555 }
1556
1557 /* Looks good. */
1558 fRamExists = true;
1559 break;
1560 }
1561
1562 /* next */
1563 pRamPrev = pRam;
1564 pRam = pRam->pNextR3;
1565 }
1566 PPGMRAMRANGE pNew;
1567 if (fRamExists)
1568 {
1569 pNew = NULL;
1570
1571 /*
1572 * Make all the pages in the range MMIO/ZERO pages, freeing any
1573 * RAM pages currently mapped here. This might not be 100% correct
1574 * for PCI memory, but we're doing the same thing for MMIO2 pages.
1575 */
1576 rc = pgmLock(pVM);
1577 if (RT_SUCCESS(rc))
1578 {
1579 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
1580 pgmUnlock(pVM);
1581 }
1582 AssertRCReturn(rc, rc);
1583 }
1584 else
1585 {
1586 pgmLock(pVM);
1587
1588 /*
1589 * No RAM range, insert an ad hoc one.
1590 *
1591 * Note that we don't have to tell REM about this range because
1592 * PGMHandlerPhysicalRegisterEx will do that for us.
1593 */
1594 Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
1595
1596 const uint32_t cPages = cb >> PAGE_SHIFT;
1597 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1598 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
1599 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1600
1601 /* Initialize the range. */
1602 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
1603 pNew->pSelfRC = MMHyperCCToRC(pVM, pNew);
1604 pNew->GCPhys = GCPhys;
1605 pNew->GCPhysLast = GCPhysLast;
1606 pNew->cb = cb;
1607 pNew->pszDesc = pszDesc;
1608 pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
1609 pNew->pvR3 = NULL;
1610 pNew->paLSPages = NULL;
1611
1612 uint32_t iPage = cPages;
1613 while (iPage-- > 0)
1614 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
1615 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
1616
1617 /* update the page count stats. */
1618 pVM->pgm.s.cPureMmioPages += cPages;
1619 pVM->pgm.s.cAllPages += cPages;
1620
1621 /* link it */
1622 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
1623
1624 pgmUnlock(pVM);
1625 }
1626
1627 /*
1628 * Register the access handler.
1629 */
1630 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
1631 pfnHandlerR3, pvUserR3,
1632 pfnHandlerR0, pvUserR0,
1633 pfnHandlerRC, pvUserRC, pszDesc);
1634 if ( RT_FAILURE(rc)
1635 && !fRamExists)
1636 {
1637 pVM->pgm.s.cPureMmioPages -= cb >> PAGE_SHIFT;
1638 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
1639
1640 /* remove the ad hoc range. */
1641 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
1642 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
1643 MMHyperFree(pVM, pRam);
1644 }
1645 PGMPhysInvalidatePageMapTLB(pVM);
1646
1647 return rc;
1648}
1649
1650
1651/**
1652 * This is the interface IOM is using to register an MMIO region.
1653 *
1654 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
1655 * any ad hoc PGMRAMRANGE left behind.
1656 *
1657 * @returns VBox status code.
1658 * @param pVM Pointer to the shared VM structure.
1659 * @param GCPhys The start of the MMIO region.
1660 * @param cb The size of the MMIO region.
1661 */
1662VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
1663{
1664 VM_ASSERT_EMT(pVM);
1665
1666 /*
1667 * First deregister the handler, then check if we should remove the ram range.
1668 */
1669 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1670 if (RT_SUCCESS(rc))
1671 {
1672 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1673 PPGMRAMRANGE pRamPrev = NULL;
1674 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1675 while (pRam && GCPhysLast >= pRam->GCPhys)
1676 {
1677 /** @todo We're being a bit too careful here. rewrite. */
1678 if ( GCPhysLast == pRam->GCPhysLast
1679 && GCPhys == pRam->GCPhys)
1680 {
1681 Assert(pRam->cb == cb);
1682
1683 /*
1684 * See if all the pages are dead MMIO pages.
1685 */
1686 uint32_t const cPages = cb >> PAGE_SHIFT;
1687 bool fAllMMIO = true;
1688 uint32_t iPage = 0;
1689 uint32_t cLeft = cPages;
1690 while (cLeft-- > 0)
1691 {
1692 PPGMPAGE pPage = &pRam->aPages[iPage];
1693 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
1694 /*|| not-out-of-action later */)
1695 {
1696 fAllMMIO = false;
1697 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1698 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1699 break;
1700 }
1701 Assert(PGM_PAGE_IS_ZERO(pPage));
1702 pPage++;
1703 }
1704 if (fAllMMIO)
1705 {
1706 /*
1707 * Ad-hoc range, unlink and free it.
1708 */
1709 Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
1710 GCPhys, GCPhysLast, pRam->pszDesc));
1711
1712 pVM->pgm.s.cAllPages -= cPages;
1713 pVM->pgm.s.cPureMmioPages -= cPages;
1714
1715 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
1716 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
1717 MMHyperFree(pVM, pRam);
1718 break;
1719 }
1720 }
1721
1722 /*
1723 * Range match? It will all be within one range (see PGMAllHandler.cpp).
1724 */
1725 if ( GCPhysLast >= pRam->GCPhys
1726 && GCPhys <= pRam->GCPhysLast)
1727 {
1728 Assert(GCPhys >= pRam->GCPhys);
1729 Assert(GCPhysLast <= pRam->GCPhysLast);
1730
1731 /*
1732 * Turn the pages back into RAM pages.
1733 */
1734 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1735 uint32_t cLeft = cb >> PAGE_SHIFT;
1736 while (cLeft--)
1737 {
1738 PPGMPAGE pPage = &pRam->aPages[iPage];
1739 AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1740 AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
1741 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
1742 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_RAM);
1743 }
1744 break;
1745 }
1746
1747 /* next */
1748 pRamPrev = pRam;
1749 pRam = pRam->pNextR3;
1750 }
1751 }
1752
1753 PGMPhysInvalidatePageMapTLB(pVM);
1754 return rc;
1755}
1756
1757
1758/**
1759 * Locate a MMIO2 range.
1760 *
1761 * @returns Pointer to the MMIO2 range.
1762 * @param pVM Pointer to the shared VM structure.
1763 * @param pDevIns The device instance owning the region.
1764 * @param iRegion The region.
1765 */
1766DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1767{
1768 /*
1769 * Search the list.
1770 */
1771 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1772 if ( pCur->pDevInsR3 == pDevIns
1773 && pCur->iRegion == iRegion)
1774 return pCur;
1775 return NULL;
1776}
1777
1778
1779/**
1780 * Allocate and register an MMIO2 region.
1781 *
1782 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
1783 * RAM associated with a device. It is also non-shared memory with a
1784 * permanent ring-3 mapping and page backing (presently).
1785 *
1786 * A MMIO2 range may overlap with base memory if a lot of RAM
1787 * is configured for the VM, in which case we'll drop the base
1788 * memory pages. Presently we will make no attempt to preserve
1789 * anything that happens to be present in the base memory that
1790 * is replaced, this is of course incorrectly but it's too much
1791 * effort.
1792 *
1793 * @returns VBox status code.
1794 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
1795 * @retval VERR_ALREADY_EXISTS if the region already exists.
1796 *
1797 * @param pVM Pointer to the shared VM structure.
1798 * @param pDevIns The device instance owning the region.
1799 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
1800 * this number has to be the number of that region. Otherwise
1801 * it can be any number safe UINT8_MAX.
1802 * @param cb The size of the region. Must be page aligned.
1803 * @param fFlags Reserved for future use, must be zero.
1804 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
1805 * @param pszDesc The description.
1806 */
1807VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
1808{
1809 /*
1810 * Validate input.
1811 */
1812 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1813 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1814 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1815 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
1816 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1817 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
1818 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
1819 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1820 AssertReturn(cb, VERR_INVALID_PARAMETER);
1821 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
1822
1823 const uint32_t cPages = cb >> PAGE_SHIFT;
1824 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
1825 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
1826
1827 /*
1828 * For the 2nd+ instance, mangle the description string so it's unique.
1829 */
1830 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
1831 {
1832 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
1833 if (!pszDesc)
1834 return VERR_NO_MEMORY;
1835 }
1836
1837 /*
1838 * Try reserve and allocate the backing memory first as this is what is
1839 * most likely to fail.
1840 */
1841 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
1842 if (RT_SUCCESS(rc))
1843 {
1844 void *pvPages;
1845 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
1846 if (RT_SUCCESS(rc))
1847 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
1848 if (RT_SUCCESS(rc))
1849 {
1850 memset(pvPages, 0, cPages * PAGE_SIZE);
1851
1852 /*
1853 * Create the MMIO2 range record for it.
1854 */
1855 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
1856 PPGMMMIO2RANGE pNew;
1857 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1858 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
1859 if (RT_SUCCESS(rc))
1860 {
1861 pNew->pDevInsR3 = pDevIns;
1862 pNew->pvR3 = pvPages;
1863 //pNew->pNext = NULL;
1864 //pNew->fMapped = false;
1865 //pNew->fOverlapping = false;
1866 pNew->iRegion = iRegion;
1867 pNew->idSavedState = UINT8_MAX;
1868 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
1869 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange);
1870 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
1871 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
1872 pNew->RamRange.pszDesc = pszDesc;
1873 pNew->RamRange.cb = cb;
1874 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2;
1875 pNew->RamRange.pvR3 = pvPages;
1876 //pNew->RamRange.paLSPages = NULL;
1877
1878 uint32_t iPage = cPages;
1879 while (iPage-- > 0)
1880 {
1881 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
1882 paPages[iPage].Phys, NIL_GMM_PAGEID,
1883 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1884 }
1885
1886 /* update page count stats */
1887 pVM->pgm.s.cAllPages += cPages;
1888 pVM->pgm.s.cPrivatePages += cPages;
1889
1890 /*
1891 * Link it into the list.
1892 * Since there is no particular order, just push it.
1893 */
1894 pgmLock(pVM);
1895 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
1896 pVM->pgm.s.pMmio2RangesR3 = pNew;
1897 pgmUnlock(pVM);
1898
1899 *ppv = pvPages;
1900 RTMemTmpFree(paPages);
1901 PGMPhysInvalidatePageMapTLB(pVM);
1902 return VINF_SUCCESS;
1903 }
1904
1905 SUPR3PageFreeEx(pvPages, cPages);
1906 }
1907 RTMemTmpFree(paPages);
1908 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
1909 }
1910 if (pDevIns->iInstance > 0)
1911 MMR3HeapFree((void *)pszDesc);
1912 return rc;
1913}
1914
1915
1916/**
1917 * Deregisters and frees an MMIO2 region.
1918 *
1919 * Any physical (and virtual) access handlers registered for the region must
1920 * be deregistered before calling this function.
1921 *
1922 * @returns VBox status code.
1923 * @param pVM Pointer to the shared VM structure.
1924 * @param pDevIns The device instance owning the region.
1925 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
1926 */
1927VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1928{
1929 /*
1930 * Validate input.
1931 */
1932 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1933 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1934 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
1935
1936 pgmLock(pVM);
1937 int rc = VINF_SUCCESS;
1938 unsigned cFound = 0;
1939 PPGMMMIO2RANGE pPrev = NULL;
1940 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
1941 while (pCur)
1942 {
1943 if ( pCur->pDevInsR3 == pDevIns
1944 && ( iRegion == UINT32_MAX
1945 || pCur->iRegion == iRegion))
1946 {
1947 cFound++;
1948
1949 /*
1950 * Unmap it if it's mapped.
1951 */
1952 if (pCur->fMapped)
1953 {
1954 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
1955 AssertRC(rc2);
1956 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1957 rc = rc2;
1958 }
1959
1960 /*
1961 * Unlink it
1962 */
1963 PPGMMMIO2RANGE pNext = pCur->pNextR3;
1964 if (pPrev)
1965 pPrev->pNextR3 = pNext;
1966 else
1967 pVM->pgm.s.pMmio2RangesR3 = pNext;
1968 pCur->pNextR3 = NULL;
1969
1970 /*
1971 * Free the memory.
1972 */
1973 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
1974 AssertRC(rc2);
1975 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1976 rc = rc2;
1977
1978 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
1979 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
1980 AssertRC(rc2);
1981 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1982 rc = rc2;
1983
1984 /* we're leaking hyper memory here if done at runtime. */
1985#ifdef VBOX_STRICT
1986 VMSTATE const enmState = VMR3GetState(pVM);
1987 AssertMsg( enmState == VMSTATE_POWERING_OFF
1988 || enmState == VMSTATE_POWERING_OFF_LS
1989 || enmState == VMSTATE_OFF
1990 || enmState == VMSTATE_OFF_LS
1991 || enmState == VMSTATE_DESTROYING
1992 || enmState == VMSTATE_TERMINATED
1993 || enmState == VMSTATE_CREATING
1994 , ("%s\n", VMR3GetStateName(enmState)));
1995#endif
1996 /*rc = MMHyperFree(pVM, pCur);
1997 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
1998
1999
2000 /* update page count stats */
2001 pVM->pgm.s.cAllPages -= cPages;
2002 pVM->pgm.s.cPrivatePages -= cPages;
2003
2004 /* next */
2005 pCur = pNext;
2006 }
2007 else
2008 {
2009 pPrev = pCur;
2010 pCur = pCur->pNextR3;
2011 }
2012 }
2013 PGMPhysInvalidatePageMapTLB(pVM);
2014 pgmUnlock(pVM);
2015 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
2016}
2017
2018
2019/**
2020 * Maps a MMIO2 region.
2021 *
2022 * This is done when a guest / the bios / state loading changes the
2023 * PCI config. The replacing of base memory has the same restrictions
2024 * as during registration, of course.
2025 *
2026 * @returns VBox status code.
2027 *
2028 * @param pVM Pointer to the shared VM structure.
2029 * @param pDevIns The
2030 */
2031VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2032{
2033 /*
2034 * Validate input
2035 */
2036 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2037 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2038 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2039 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2040 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2041 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2042
2043 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2044 AssertReturn(pCur, VERR_NOT_FOUND);
2045 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
2046 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
2047 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
2048
2049 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
2050 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2051
2052 /*
2053 * Find our location in the ram range list, checking for
2054 * restriction we don't bother implementing yet (partially overlapping).
2055 */
2056 bool fRamExists = false;
2057 PPGMRAMRANGE pRamPrev = NULL;
2058 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2059 while (pRam && GCPhysLast >= pRam->GCPhys)
2060 {
2061 if ( GCPhys <= pRam->GCPhysLast
2062 && GCPhysLast >= pRam->GCPhys)
2063 {
2064 /* completely within? */
2065 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2066 && GCPhysLast <= pRam->GCPhysLast,
2067 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
2068 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
2069 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2070 VERR_PGM_RAM_CONFLICT);
2071 fRamExists = true;
2072 break;
2073 }
2074
2075 /* next */
2076 pRamPrev = pRam;
2077 pRam = pRam->pNextR3;
2078 }
2079 if (fRamExists)
2080 {
2081 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2082 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2083 while (cPagesLeft-- > 0)
2084 {
2085 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2086 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
2087 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
2088 VERR_PGM_RAM_CONFLICT);
2089 pPage++;
2090 }
2091 }
2092 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
2093 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
2094
2095 /*
2096 * Make the changes.
2097 */
2098 pgmLock(pVM);
2099
2100 pCur->RamRange.GCPhys = GCPhys;
2101 pCur->RamRange.GCPhysLast = GCPhysLast;
2102 pCur->fMapped = true;
2103 pCur->fOverlapping = fRamExists;
2104
2105 if (fRamExists)
2106 {
2107/** @todo use pgmR3PhysFreePageRange here. */
2108 uint32_t cPendingPages = 0;
2109 PGMMFREEPAGESREQ pReq;
2110 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2111 AssertLogRelRCReturn(rc, rc);
2112
2113 /* replace the pages, freeing all present RAM pages. */
2114 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
2115 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2116 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2117 while (cPagesLeft-- > 0)
2118 {
2119 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
2120 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
2121
2122 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
2123 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
2124 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
2125 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
2126
2127 pVM->pgm.s.cZeroPages--;
2128 GCPhys += PAGE_SIZE;
2129 pPageSrc++;
2130 pPageDst++;
2131 }
2132
2133 /* Flush physical page map TLB. */
2134 PGMPhysInvalidatePageMapTLB(pVM);
2135
2136 if (cPendingPages)
2137 {
2138 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2139 AssertLogRelRCReturn(rc, rc);
2140 }
2141 GMMR3FreePagesCleanup(pReq);
2142 pgmUnlock(pVM);
2143 }
2144 else
2145 {
2146 RTGCPHYS cb = pCur->RamRange.cb;
2147
2148 /* link in the ram range */
2149 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
2150 pgmUnlock(pVM);
2151
2152 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
2153 }
2154
2155 PGMPhysInvalidatePageMapTLB(pVM);
2156 return VINF_SUCCESS;
2157}
2158
2159
2160/**
2161 * Unmaps a MMIO2 region.
2162 *
2163 * This is done when a guest / the bios / state loading changes the
2164 * PCI config. The replacing of base memory has the same restrictions
2165 * as during registration, of course.
2166 */
2167VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2168{
2169 /*
2170 * Validate input
2171 */
2172 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2173 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2174 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2175 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2176 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2177 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2178
2179 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2180 AssertReturn(pCur, VERR_NOT_FOUND);
2181 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
2182 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
2183 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
2184
2185 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
2186 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
2187
2188 /*
2189 * Unmap it.
2190 */
2191 pgmLock(pVM);
2192
2193 RTGCPHYS GCPhysRangeREM;
2194 RTGCPHYS cbRangeREM;
2195 bool fInformREM;
2196 if (pCur->fOverlapping)
2197 {
2198 /* Restore the RAM pages we've replaced. */
2199 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2200 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
2201 pRam = pRam->pNextR3;
2202
2203 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
2204 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
2205 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2206 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2207 while (cPagesLeft-- > 0)
2208 {
2209 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhysZeroPg);
2210 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
2211 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
2212 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
2213 PGM_PAGE_SET_PDE_TYPE(pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
2214
2215 pVM->pgm.s.cZeroPages++;
2216 pPageDst++;
2217 }
2218
2219 /* Flush physical page map TLB. */
2220 PGMPhysInvalidatePageMapTLB(pVM);
2221
2222 GCPhysRangeREM = NIL_RTGCPHYS; /* shuts up gcc */
2223 cbRangeREM = RTGCPHYS_MAX; /* ditto */
2224 fInformREM = false;
2225 }
2226 else
2227 {
2228 GCPhysRangeREM = pCur->RamRange.GCPhys;
2229 cbRangeREM = pCur->RamRange.cb;
2230 fInformREM = true;
2231
2232 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
2233 }
2234
2235 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
2236 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
2237 pCur->fOverlapping = false;
2238 pCur->fMapped = false;
2239
2240 PGMPhysInvalidatePageMapTLB(pVM);
2241 pgmUnlock(pVM);
2242
2243 if (fInformREM)
2244 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, cbRangeREM);
2245
2246 return VINF_SUCCESS;
2247}
2248
2249
2250/**
2251 * Checks if the given address is an MMIO2 base address or not.
2252 *
2253 * @returns true/false accordingly.
2254 * @param pVM Pointer to the shared VM structure.
2255 * @param pDevIns The owner of the memory, optional.
2256 * @param GCPhys The address to check.
2257 */
2258VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
2259{
2260 /*
2261 * Validate input
2262 */
2263 VM_ASSERT_EMT_RETURN(pVM, false);
2264 AssertPtrReturn(pDevIns, false);
2265 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
2266 AssertReturn(GCPhys != 0, false);
2267 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
2268
2269 /*
2270 * Search the list.
2271 */
2272 pgmLock(pVM);
2273 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
2274 if (pCur->RamRange.GCPhys == GCPhys)
2275 {
2276 Assert(pCur->fMapped);
2277 pgmUnlock(pVM);
2278 return true;
2279 }
2280 pgmUnlock(pVM);
2281 return false;
2282}
2283
2284
2285/**
2286 * Gets the HC physical address of a page in the MMIO2 region.
2287 *
2288 * This is API is intended for MMHyper and shouldn't be called
2289 * by anyone else...
2290 *
2291 * @returns VBox status code.
2292 * @param pVM Pointer to the shared VM structure.
2293 * @param pDevIns The owner of the memory, optional.
2294 * @param iRegion The region.
2295 * @param off The page expressed an offset into the MMIO2 region.
2296 * @param pHCPhys Where to store the result.
2297 */
2298VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
2299{
2300 /*
2301 * Validate input
2302 */
2303 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2304 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2305 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2306
2307 pgmLock(pVM);
2308 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2309 AssertReturn(pCur, VERR_NOT_FOUND);
2310 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2311
2312 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
2313 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
2314 pgmUnlock(pVM);
2315 return VINF_SUCCESS;
2316}
2317
2318
2319/**
2320 * Maps a portion of an MMIO2 region into kernel space (host).
2321 *
2322 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
2323 * or the VM is terminated.
2324 *
2325 * @return VBox status code.
2326 *
2327 * @param pVM Pointer to the shared VM structure.
2328 * @param pDevIns The device owning the MMIO2 memory.
2329 * @param iRegion The region.
2330 * @param off The offset into the region. Must be page aligned.
2331 * @param cb The number of bytes to map. Must be page aligned.
2332 * @param pszDesc Mapping description.
2333 * @param pR0Ptr Where to store the R0 address.
2334 */
2335VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
2336 const char *pszDesc, PRTR0PTR pR0Ptr)
2337{
2338 /*
2339 * Validate input.
2340 */
2341 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2342 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2343 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2344
2345 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2346 AssertReturn(pCur, VERR_NOT_FOUND);
2347 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2348 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2349 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
2350
2351 /*
2352 * Pass the request on to the support library/driver.
2353 */
2354 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
2355
2356 return rc;
2357}
2358
2359
2360/**
2361 * Registers a ROM image.
2362 *
2363 * Shadowed ROM images requires double the amount of backing memory, so,
2364 * don't use that unless you have to. Shadowing of ROM images is process
2365 * where we can select where the reads go and where the writes go. On real
2366 * hardware the chipset provides means to configure this. We provide
2367 * PGMR3PhysProtectROM() for this purpose.
2368 *
2369 * A read-only copy of the ROM image will always be kept around while we
2370 * will allocate RAM pages for the changes on demand (unless all memory
2371 * is configured to be preallocated).
2372 *
2373 * @returns VBox status.
2374 * @param pVM VM Handle.
2375 * @param pDevIns The device instance owning the ROM.
2376 * @param GCPhys First physical address in the range.
2377 * Must be page aligned!
2378 * @param cbRange The size of the range (in bytes).
2379 * Must be page aligned!
2380 * @param pvBinary Pointer to the binary data backing the ROM image.
2381 * This must be exactly \a cbRange in size.
2382 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
2383 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
2384 * @param pszDesc Pointer to description string. This must not be freed.
2385 *
2386 * @remark There is no way to remove the rom, automatically on device cleanup or
2387 * manually from the device yet. This isn't difficult in any way, it's
2388 * just not something we expect to be necessary for a while.
2389 */
2390VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
2391 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
2392{
2393 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
2394 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
2395
2396 /*
2397 * Validate input.
2398 */
2399 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2400 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
2401 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
2402 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2403 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2404 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
2405 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2406 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
2407 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
2408
2409 const uint32_t cPages = cb >> PAGE_SHIFT;
2410
2411 /*
2412 * Find the ROM location in the ROM list first.
2413 */
2414 PPGMROMRANGE pRomPrev = NULL;
2415 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
2416 while (pRom && GCPhysLast >= pRom->GCPhys)
2417 {
2418 if ( GCPhys <= pRom->GCPhysLast
2419 && GCPhysLast >= pRom->GCPhys)
2420 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
2421 GCPhys, GCPhysLast, pszDesc,
2422 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
2423 VERR_PGM_RAM_CONFLICT);
2424 /* next */
2425 pRomPrev = pRom;
2426 pRom = pRom->pNextR3;
2427 }
2428
2429 /*
2430 * Find the RAM location and check for conflicts.
2431 *
2432 * Conflict detection is a bit different than for RAM
2433 * registration since a ROM can be located within a RAM
2434 * range. So, what we have to check for is other memory
2435 * types (other than RAM that is) and that we don't span
2436 * more than one RAM range (layz).
2437 */
2438 bool fRamExists = false;
2439 PPGMRAMRANGE pRamPrev = NULL;
2440 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
2441 while (pRam && GCPhysLast >= pRam->GCPhys)
2442 {
2443 if ( GCPhys <= pRam->GCPhysLast
2444 && GCPhysLast >= pRam->GCPhys)
2445 {
2446 /* completely within? */
2447 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2448 && GCPhysLast <= pRam->GCPhysLast,
2449 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
2450 GCPhys, GCPhysLast, pszDesc,
2451 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2452 VERR_PGM_RAM_CONFLICT);
2453 fRamExists = true;
2454 break;
2455 }
2456
2457 /* next */
2458 pRamPrev = pRam;
2459 pRam = pRam->pNextR3;
2460 }
2461 if (fRamExists)
2462 {
2463 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2464 uint32_t cPagesLeft = cPages;
2465 while (cPagesLeft-- > 0)
2466 {
2467 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2468 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
2469 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
2470 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
2471 Assert(PGM_PAGE_IS_ZERO(pPage));
2472 pPage++;
2473 }
2474 }
2475
2476 /*
2477 * Update the base memory reservation if necessary.
2478 */
2479 uint32_t cExtraBaseCost = fRamExists ? 0 : cPages;
2480 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2481 cExtraBaseCost += cPages;
2482 if (cExtraBaseCost)
2483 {
2484 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
2485 if (RT_FAILURE(rc))
2486 return rc;
2487 }
2488
2489 /*
2490 * Allocate memory for the virgin copy of the RAM.
2491 */
2492 PGMMALLOCATEPAGESREQ pReq;
2493 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
2494 AssertRCReturn(rc, rc);
2495
2496 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2497 {
2498 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
2499 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
2500 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
2501 }
2502
2503 pgmLock(pVM);
2504 rc = GMMR3AllocatePagesPerform(pVM, pReq);
2505 pgmUnlock(pVM);
2506 if (RT_FAILURE(rc))
2507 {
2508 GMMR3AllocatePagesCleanup(pReq);
2509 return rc;
2510 }
2511
2512 /*
2513 * Allocate the new ROM range and RAM range (if necessary).
2514 */
2515 PPGMROMRANGE pRomNew;
2516 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
2517 if (RT_SUCCESS(rc))
2518 {
2519 PPGMRAMRANGE pRamNew = NULL;
2520 if (!fRamExists)
2521 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
2522 if (RT_SUCCESS(rc))
2523 {
2524 pgmLock(pVM);
2525
2526 /*
2527 * Initialize and insert the RAM range (if required).
2528 */
2529 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
2530 if (!fRamExists)
2531 {
2532 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
2533 pRamNew->pSelfRC = MMHyperCCToRC(pVM, pRamNew);
2534 pRamNew->GCPhys = GCPhys;
2535 pRamNew->GCPhysLast = GCPhysLast;
2536 pRamNew->cb = cb;
2537 pRamNew->pszDesc = pszDesc;
2538 pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
2539 pRamNew->pvR3 = NULL;
2540 pRamNew->paLSPages = NULL;
2541
2542 PPGMPAGE pPage = &pRamNew->aPages[0];
2543 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2544 {
2545 PGM_PAGE_INIT(pPage,
2546 pReq->aPages[iPage].HCPhysGCPhys,
2547 pReq->aPages[iPage].idPage,
2548 PGMPAGETYPE_ROM,
2549 PGM_PAGE_STATE_ALLOCATED);
2550
2551 pRomPage->Virgin = *pPage;
2552 }
2553
2554 pVM->pgm.s.cAllPages += cPages;
2555 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
2556 }
2557 else
2558 {
2559 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2560 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
2561 {
2562 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
2563 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
2564 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
2565 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
2566
2567 pRomPage->Virgin = *pPage;
2568 }
2569
2570 pRamNew = pRam;
2571
2572 pVM->pgm.s.cZeroPages -= cPages;
2573 }
2574 pVM->pgm.s.cPrivatePages += cPages;
2575
2576 /* Flush physical page map TLB. */
2577 PGMPhysInvalidatePageMapTLB(pVM);
2578
2579 pgmUnlock(pVM);
2580
2581
2582 /*
2583 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
2584 *
2585 * If it's shadowed we'll register the handler after the ROM notification
2586 * so we get the access handler callbacks that we should. If it isn't
2587 * shadowed we'll do it the other way around to make REM use the built-in
2588 * ROM behavior and not the handler behavior (which is to route all access
2589 * to PGM atm).
2590 */
2591 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2592 {
2593 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
2594 rc = PGMR3HandlerPhysicalRegister(pVM,
2595 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2596 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2597 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2598 GCPhys, GCPhysLast,
2599 pgmR3PhysRomWriteHandler, pRomNew,
2600 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2601 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2602 }
2603 else
2604 {
2605 rc = PGMR3HandlerPhysicalRegister(pVM,
2606 fFlags & PGMPHYS_ROM_FLAGS_SHADOWED
2607 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
2608 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2609 GCPhys, GCPhysLast,
2610 pgmR3PhysRomWriteHandler, pRomNew,
2611 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
2612 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
2613 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
2614 }
2615 if (RT_SUCCESS(rc))
2616 {
2617 pgmLock(pVM);
2618
2619 /*
2620 * Copy the image over to the virgin pages.
2621 * This must be done after linking in the RAM range.
2622 */
2623 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
2624 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
2625 {
2626 void *pvDstPage;
2627 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pvDstPage);
2628 if (RT_FAILURE(rc))
2629 {
2630 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
2631 break;
2632 }
2633 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
2634 }
2635 if (RT_SUCCESS(rc))
2636 {
2637 /*
2638 * Initialize the ROM range.
2639 * Note that the Virgin member of the pages has already been initialized above.
2640 */
2641 pRomNew->GCPhys = GCPhys;
2642 pRomNew->GCPhysLast = GCPhysLast;
2643 pRomNew->cb = cb;
2644 pRomNew->fFlags = fFlags;
2645 pRomNew->idSavedState = UINT8_MAX;
2646 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY ? pvBinary : NULL;
2647 pRomNew->pszDesc = pszDesc;
2648
2649 for (unsigned iPage = 0; iPage < cPages; iPage++)
2650 {
2651 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
2652 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
2653 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
2654 }
2655
2656 /* update the page count stats for the shadow pages. */
2657 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2658 {
2659 pVM->pgm.s.cZeroPages += cPages;
2660 pVM->pgm.s.cAllPages += cPages;
2661 }
2662
2663 /*
2664 * Insert the ROM range, tell REM and return successfully.
2665 */
2666 pRomNew->pNextR3 = pRom;
2667 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
2668 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
2669
2670 if (pRomPrev)
2671 {
2672 pRomPrev->pNextR3 = pRomNew;
2673 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
2674 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
2675 }
2676 else
2677 {
2678 pVM->pgm.s.pRomRangesR3 = pRomNew;
2679 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
2680 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
2681 }
2682
2683 PGMPhysInvalidatePageMapTLB(pVM);
2684 GMMR3AllocatePagesCleanup(pReq);
2685 pgmUnlock(pVM);
2686 return VINF_SUCCESS;
2687 }
2688
2689 /* bail out */
2690
2691 pgmUnlock(pVM);
2692 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2693 AssertRC(rc2);
2694 pgmLock(pVM);
2695 }
2696
2697 if (!fRamExists)
2698 {
2699 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
2700 MMHyperFree(pVM, pRamNew);
2701 }
2702 }
2703 MMHyperFree(pVM, pRomNew);
2704 }
2705
2706 /** @todo Purge the mapping cache or something... */
2707 GMMR3FreeAllocatedPages(pVM, pReq);
2708 GMMR3AllocatePagesCleanup(pReq);
2709 pgmUnlock(pVM);
2710 return rc;
2711}
2712
2713
2714/**
2715 * \#PF Handler callback for ROM write accesses.
2716 *
2717 * @returns VINF_SUCCESS if the handler have carried out the operation.
2718 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2719 * @param pVM VM Handle.
2720 * @param GCPhys The physical address the guest is writing to.
2721 * @param pvPhys The HC mapping of that address.
2722 * @param pvBuf What the guest is reading/writing.
2723 * @param cbBuf How much it's reading/writing.
2724 * @param enmAccessType The access type.
2725 * @param pvUser User argument.
2726 */
2727static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
2728{
2729 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
2730 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2731 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
2732 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2733 Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
2734
2735 if (enmAccessType == PGMACCESSTYPE_READ)
2736 {
2737 switch (pRomPage->enmProt)
2738 {
2739 /*
2740 * Take the default action.
2741 */
2742 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2743 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2744 case PGMROMPROT_READ_ROM_WRITE_RAM:
2745 case PGMROMPROT_READ_RAM_WRITE_RAM:
2746 return VINF_PGM_HANDLER_DO_DEFAULT;
2747
2748 default:
2749 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2750 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2751 VERR_INTERNAL_ERROR);
2752 }
2753 }
2754 else
2755 {
2756 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2757 switch (pRomPage->enmProt)
2758 {
2759 /*
2760 * Ignore writes.
2761 */
2762 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
2763 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
2764 return VINF_SUCCESS;
2765
2766 /*
2767 * Write to the ram page.
2768 */
2769 case PGMROMPROT_READ_ROM_WRITE_RAM:
2770 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
2771 {
2772 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
2773 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
2774
2775 /*
2776 * Take the lock, do lazy allocation, map the page and copy the data.
2777 *
2778 * Note that we have to bypass the mapping TLB since it works on
2779 * guest physical addresses and entering the shadow page would
2780 * kind of screw things up...
2781 */
2782 int rc = pgmLock(pVM);
2783 AssertRC(rc);
2784
2785 PPGMPAGE pShadowPage = &pRomPage->Shadow;
2786 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
2787 {
2788 pShadowPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2789 AssertLogRelReturn(pShadowPage, VERR_INTERNAL_ERROR);
2790 }
2791
2792 void *pvDstPage;
2793 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
2794 if (RT_SUCCESS(rc))
2795 {
2796 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
2797 pRomPage->LiveSave.fWrittenTo = true;
2798 }
2799
2800 pgmUnlock(pVM);
2801 return rc;
2802 }
2803
2804 default:
2805 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
2806 pRom->aPages[iPage].enmProt, iPage, GCPhys),
2807 VERR_INTERNAL_ERROR);
2808 }
2809 }
2810}
2811
2812
2813/**
2814 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
2815 * and verify that the virgin part is untouched.
2816 *
2817 * This is done after the normal memory has been cleared.
2818 *
2819 * ASSUMES that the caller owns the PGM lock.
2820 *
2821 * @param pVM The VM handle.
2822 */
2823int pgmR3PhysRomReset(PVM pVM)
2824{
2825 Assert(PGMIsLockOwner(pVM));
2826 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2827 {
2828 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
2829
2830 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
2831 {
2832 /*
2833 * Reset the physical handler.
2834 */
2835 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
2836 AssertRCReturn(rc, rc);
2837
2838 /*
2839 * What we do with the shadow pages depends on the memory
2840 * preallocation option. If not enabled, we'll just throw
2841 * out all the dirty pages and replace them by the zero page.
2842 */
2843 if (!pVM->pgm.s.fRamPreAlloc)
2844 {
2845 /* Free the dirty pages. */
2846 uint32_t cPendingPages = 0;
2847 PGMMFREEPAGESREQ pReq;
2848 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2849 AssertRCReturn(rc, rc);
2850
2851 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2852 if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
2853 && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
2854 {
2855 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
2856 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow, pRom->GCPhys + (iPage << PAGE_SHIFT));
2857 AssertLogRelRCReturn(rc, rc);
2858 }
2859
2860 if (cPendingPages)
2861 {
2862 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2863 AssertLogRelRCReturn(rc, rc);
2864 }
2865 GMMR3FreePagesCleanup(pReq);
2866 }
2867 else
2868 {
2869 /* clear all the shadow pages. */
2870 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2871 {
2872 Assert(!PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
2873 void *pvDstPage;
2874 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2875 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
2876 if (RT_FAILURE(rc))
2877 break;
2878 ASMMemZeroPage(pvDstPage);
2879 }
2880 AssertRCReturn(rc, rc);
2881 }
2882 }
2883
2884#ifdef VBOX_STRICT
2885 /*
2886 * Verify that the virgin page is unchanged if possible.
2887 */
2888 if (pRom->pvOriginal)
2889 {
2890 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
2891 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
2892 {
2893 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
2894 void const *pvDstPage;
2895 int rc = pgmPhysPageMapReadOnly(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPage);
2896 if (RT_FAILURE(rc))
2897 break;
2898 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
2899 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
2900 GCPhys, pRom->pszDesc));
2901 }
2902 }
2903#endif
2904 }
2905
2906 return VINF_SUCCESS;
2907}
2908
2909
2910/**
2911 * Change the shadowing of a range of ROM pages.
2912 *
2913 * This is intended for implementing chipset specific memory registers
2914 * and will not be very strict about the input. It will silently ignore
2915 * any pages that are not the part of a shadowed ROM.
2916 *
2917 * @returns VBox status code.
2918 * @retval VINF_PGM_SYNC_CR3
2919 *
2920 * @param pVM Pointer to the shared VM structure.
2921 * @param GCPhys Where to start. Page aligned.
2922 * @param cb How much to change. Page aligned.
2923 * @param enmProt The new ROM protection.
2924 */
2925VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
2926{
2927 /*
2928 * Check input
2929 */
2930 if (!cb)
2931 return VINF_SUCCESS;
2932 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2933 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2934 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2935 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2936 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
2937
2938 /*
2939 * Process the request.
2940 */
2941 pgmLock(pVM);
2942 int rc = VINF_SUCCESS;
2943 bool fFlushTLB = false;
2944 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2945 {
2946 if ( GCPhys <= pRom->GCPhysLast
2947 && GCPhysLast >= pRom->GCPhys
2948 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
2949 {
2950 /*
2951 * Iterate the relevant pages and make necessary the changes.
2952 */
2953 bool fChanges = false;
2954 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
2955 ? pRom->cb >> PAGE_SHIFT
2956 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
2957 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2958 iPage < cPages;
2959 iPage++)
2960 {
2961 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2962 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
2963 {
2964 fChanges = true;
2965
2966 /* flush references to the page. */
2967 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
2968 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage, &fFlushTLB);
2969 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
2970 rc = rc2;
2971
2972 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2973 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2974
2975 *pOld = *pRamPage;
2976 *pRamPage = *pNew;
2977 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
2978 }
2979 pRomPage->enmProt = enmProt;
2980 }
2981
2982 /*
2983 * Reset the access handler if we made changes, no need
2984 * to optimize this.
2985 */
2986 if (fChanges)
2987 {
2988 int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
2989 if (RT_FAILURE(rc2))
2990 {
2991 pgmUnlock(pVM);
2992 AssertRC(rc);
2993 return rc2;
2994 }
2995 }
2996
2997 /* Advance - cb isn't updated. */
2998 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
2999 }
3000 }
3001 pgmUnlock(pVM);
3002 if (fFlushTLB)
3003 PGM_INVL_ALL_VCPU_TLBS(pVM);
3004
3005 return rc;
3006}
3007
3008
3009/**
3010 * Sets the Address Gate 20 state.
3011 *
3012 * @param pVCpu The VCPU to operate on.
3013 * @param fEnable True if the gate should be enabled.
3014 * False if the gate should be disabled.
3015 */
3016VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
3017{
3018 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
3019 if (pVCpu->pgm.s.fA20Enabled != fEnable)
3020 {
3021 pVCpu->pgm.s.fA20Enabled = fEnable;
3022 pVCpu->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
3023 REMR3A20Set(pVCpu->pVMR3, pVCpu, fEnable);
3024 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
3025 }
3026}
3027
3028
3029/**
3030 * Tree enumeration callback for dealing with age rollover.
3031 * It will perform a simple compression of the current age.
3032 */
3033static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
3034{
3035 Assert(PGMIsLockOwner((PVM)pvUser));
3036 /* Age compression - ASSUMES iNow == 4. */
3037 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3038 if (pChunk->iAge >= UINT32_C(0xffffff00))
3039 pChunk->iAge = 3;
3040 else if (pChunk->iAge >= UINT32_C(0xfffff000))
3041 pChunk->iAge = 2;
3042 else if (pChunk->iAge)
3043 pChunk->iAge = 1;
3044 else /* iAge = 0 */
3045 pChunk->iAge = 4;
3046
3047 /* reinsert */
3048 PVM pVM = (PVM)pvUser;
3049 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
3050 pChunk->AgeCore.Key = pChunk->iAge;
3051 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3052 return 0;
3053}
3054
3055
3056/**
3057 * Tree enumeration callback that updates the chunks that have
3058 * been used since the last
3059 */
3060static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
3061{
3062 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3063 if (!pChunk->iAge)
3064 {
3065 PVM pVM = (PVM)pvUser;
3066 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
3067 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
3068 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3069 }
3070
3071 return 0;
3072}
3073
3074
3075/**
3076 * Performs ageing of the ring-3 chunk mappings.
3077 *
3078 * @param pVM The VM handle.
3079 */
3080VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
3081{
3082 pgmLock(pVM);
3083 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
3084 pVM->pgm.s.ChunkR3Map.iNow++;
3085 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
3086 {
3087 pVM->pgm.s.ChunkR3Map.iNow = 4;
3088 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
3089 }
3090 else
3091 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
3092 pgmUnlock(pVM);
3093}
3094
3095
3096/**
3097 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
3098 */
3099typedef struct PGMR3PHYSCHUNKUNMAPCB
3100{
3101 PVM pVM; /**< The VM handle. */
3102 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
3103} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
3104
3105
3106/**
3107 * Callback used to find the mapping that's been unused for
3108 * the longest time.
3109 */
3110static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
3111{
3112 do
3113 {
3114 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
3115 if ( pChunk->iAge
3116 && !pChunk->cRefs)
3117 {
3118 /*
3119 * Check that it's not in any of the TLBs.
3120 */
3121 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
3122 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3123 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
3124 {
3125 pChunk = NULL;
3126 break;
3127 }
3128 if (pChunk)
3129 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
3130 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
3131 {
3132 pChunk = NULL;
3133 break;
3134 }
3135 if (pChunk)
3136 {
3137 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
3138 return 1; /* done */
3139 }
3140 }
3141
3142 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
3143 pNode = pNode->pList;
3144 } while (pNode);
3145 return 0;
3146}
3147
3148
3149/**
3150 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
3151 *
3152 * The candidate will not be part of any TLBs, so no need to flush
3153 * anything afterwards.
3154 *
3155 * @returns Chunk id.
3156 * @param pVM The VM handle.
3157 */
3158static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
3159{
3160 Assert(PGMIsLockOwner(pVM));
3161
3162 /*
3163 * Do tree ageing first?
3164 */
3165 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
3166 PGMR3PhysChunkAgeing(pVM);
3167
3168 /*
3169 * Enumerate the age tree starting with the left most node.
3170 */
3171 PGMR3PHYSCHUNKUNMAPCB Args;
3172 Args.pVM = pVM;
3173 Args.pChunk = NULL;
3174 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
3175 return Args.pChunk->Core.Key;
3176 return INT32_MAX;
3177}
3178
3179
3180/**
3181 * Maps the given chunk into the ring-3 mapping cache.
3182 *
3183 * This will call ring-0.
3184 *
3185 * @returns VBox status code.
3186 * @param pVM The VM handle.
3187 * @param idChunk The chunk in question.
3188 * @param ppChunk Where to store the chunk tracking structure.
3189 *
3190 * @remarks Called from within the PGM critical section.
3191 */
3192int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
3193{
3194 int rc;
3195
3196 Assert(PGMIsLockOwner(pVM));
3197 /*
3198 * Allocate a new tracking structure first.
3199 */
3200#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3201 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
3202#else
3203 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
3204#endif
3205 AssertReturn(pChunk, VERR_NO_MEMORY);
3206 pChunk->Core.Key = idChunk;
3207 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
3208 pChunk->iAge = 0;
3209 pChunk->cRefs = 0;
3210 pChunk->cPermRefs = 0;
3211 pChunk->pv = NULL;
3212
3213 /*
3214 * Request the ring-0 part to map the chunk in question and if
3215 * necessary unmap another one to make space in the mapping cache.
3216 */
3217 GMMMAPUNMAPCHUNKREQ Req;
3218 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3219 Req.Hdr.cbReq = sizeof(Req);
3220 Req.pvR3 = NULL;
3221 Req.idChunkMap = idChunk;
3222 Req.idChunkUnmap = NIL_GMM_CHUNKID;
3223 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
3224 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
3225/** @todo This is wrong. Any thread in the VM process should be able to do this,
3226 * there are depenenecies on this. What currently saves the day is that
3227 * we don't unmap anything and that all non-zero memory will therefore
3228 * be present when non-EMTs tries to access it. */
3229 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
3230 if (RT_SUCCESS(rc))
3231 {
3232 /*
3233 * Update the tree.
3234 */
3235 /* insert the new one. */
3236 AssertPtr(Req.pvR3);
3237 pChunk->pv = Req.pvR3;
3238 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
3239 AssertRelease(fRc);
3240 pVM->pgm.s.ChunkR3Map.c++;
3241
3242 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
3243 AssertRelease(fRc);
3244
3245 /* remove the unmapped one. */
3246 if (Req.idChunkUnmap != NIL_GMM_CHUNKID)
3247 {
3248 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
3249 AssertRelease(pUnmappedChunk);
3250 pUnmappedChunk->pv = NULL;
3251 pUnmappedChunk->Core.Key = UINT32_MAX;
3252#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3253 MMR3HeapFree(pUnmappedChunk);
3254#else
3255 MMR3UkHeapFree(pVM, pUnmappedChunk, MM_TAG_PGM_CHUNK_MAPPING);
3256#endif
3257 pVM->pgm.s.ChunkR3Map.c--;
3258
3259 /* Chunk removed, so clear the page map TBL as well (might still be referenced). */
3260 PGMPhysInvalidatePageMapTLB(pVM);
3261 }
3262 }
3263 else
3264 {
3265 AssertRC(rc);
3266#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3267 MMR3HeapFree(pChunk);
3268#else
3269 MMR3UkHeapFree(pVM, pChunk, MM_TAG_PGM_CHUNK_MAPPING);
3270#endif
3271 pChunk = NULL;
3272 }
3273
3274 *ppChunk = pChunk;
3275 return rc;
3276}
3277
3278
3279/**
3280 * For VMMCALLRING3_PGM_MAP_CHUNK, considered internal.
3281 *
3282 * @returns see pgmR3PhysChunkMap.
3283 * @param pVM The VM handle.
3284 * @param idChunk The chunk to map.
3285 */
3286VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
3287{
3288 PPGMCHUNKR3MAP pChunk;
3289 int rc;
3290
3291 pgmLock(pVM);
3292 rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
3293 pgmUnlock(pVM);
3294 return rc;
3295}
3296
3297
3298/**
3299 * Invalidates the TLB for the ring-3 mapping cache.
3300 *
3301 * @param pVM The VM handle.
3302 */
3303VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
3304{
3305 pgmLock(pVM);
3306 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3307 {
3308 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
3309 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
3310 }
3311 /* The page map TLB references chunks, so invalidate that one too. */
3312 PGMPhysInvalidatePageMapTLB(pVM);
3313 pgmUnlock(pVM);
3314}
3315
3316
3317/**
3318 * Response to VMMCALLRING3_PGM_ALLOCATE_LARGE_PAGE to allocate a large (2MB) page
3319 * for use with a nested paging PDE.
3320 *
3321 * @returns The following VBox status codes.
3322 * @retval VINF_SUCCESS on success.
3323 * @retval VINF_EM_NO_MEMORY if we're out of memory.
3324 *
3325 * @param pVM The VM handle.
3326 * @param GCPhys GC physical start address of the 2 MB range
3327 */
3328VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
3329{
3330 pgmLock(pVM);
3331
3332 STAM_PROFILE_START(&pVM->pgm.s.StatAllocLargePage, a);
3333 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE, 0, NULL);
3334 STAM_PROFILE_STOP(&pVM->pgm.s.StatAllocLargePage, a);
3335 if (RT_SUCCESS(rc))
3336 {
3337 Assert(pVM->pgm.s.cLargeHandyPages == 1);
3338
3339 uint32_t idPage = pVM->pgm.s.aLargeHandyPage[0].idPage;
3340 RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys;
3341
3342 void *pv;
3343
3344 /* Map the large page into our address space.
3345 *
3346 * Note: assuming that within the 2 MB range:
3347 * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise)
3348 * - user space mapping is continuous as well
3349 * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE)
3350 */
3351 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
3352 AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", idPage, HCPhys, rc));
3353
3354 if (RT_SUCCESS(rc))
3355 {
3356 /*
3357 * Clear the pages.
3358 */
3359 STAM_PROFILE_START(&pVM->pgm.s.StatClearLargePage, b);
3360 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
3361 {
3362 ASMMemZeroPage(pv);
3363
3364 PPGMPAGE pPage;
3365 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
3366 AssertRC(rc);
3367
3368 Assert(PGM_PAGE_IS_ZERO(pPage));
3369 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
3370 pVM->pgm.s.cZeroPages--;
3371
3372 /*
3373 * Do the PGMPAGE modifications.
3374 */
3375 pVM->pgm.s.cPrivatePages++;
3376 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
3377 PGM_PAGE_SET_PAGEID(pPage, idPage);
3378 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
3379 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PDE);
3380
3381 /* Somewhat dirty assumption that page ids are increasing. */
3382 idPage++;
3383
3384 HCPhys += PAGE_SIZE;
3385 GCPhys += PAGE_SIZE;
3386
3387 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
3388
3389 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
3390 }
3391 STAM_PROFILE_STOP(&pVM->pgm.s.StatClearLargePage, b);
3392
3393 /* Flush all TLBs. */
3394 PGM_INVL_ALL_VCPU_TLBS(pVM);
3395 PGMPhysInvalidatePageMapTLB(pVM);
3396 }
3397 pVM->pgm.s.cLargeHandyPages = 0;
3398 }
3399
3400 pgmUnlock(pVM);
3401 return rc;
3402}
3403
3404
3405/**
3406 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES.
3407 *
3408 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
3409 * signal and clear the out of memory condition. When contracted, this API is
3410 * used to try clear the condition when the user wants to resume.
3411 *
3412 * @returns The following VBox status codes.
3413 * @retval VINF_SUCCESS on success. FFs cleared.
3414 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
3415 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
3416 *
3417 * @param pVM The VM handle.
3418 *
3419 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
3420 * in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
3421 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
3422 * handler.
3423 */
3424VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
3425{
3426 pgmLock(pVM);
3427
3428 /*
3429 * Allocate more pages, noting down the index of the first new page.
3430 */
3431 uint32_t iClear = pVM->pgm.s.cHandyPages;
3432 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
3433 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
3434 int rcAlloc = VINF_SUCCESS;
3435 int rcSeed = VINF_SUCCESS;
3436 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3437 while (rc == VERR_GMM_SEED_ME)
3438 {
3439 void *pvChunk;
3440 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
3441 if (RT_SUCCESS(rc))
3442 {
3443 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
3444 if (RT_FAILURE(rc))
3445 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
3446 }
3447 if (RT_SUCCESS(rc))
3448 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
3449 }
3450
3451 if (RT_SUCCESS(rc))
3452 {
3453 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
3454 Assert(pVM->pgm.s.cHandyPages > 0);
3455 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3456 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
3457
3458 /*
3459 * Clear the pages.
3460 */
3461 while (iClear < pVM->pgm.s.cHandyPages)
3462 {
3463 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
3464 void *pv;
3465 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
3466 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
3467 ASMMemZeroPage(pv);
3468 iClear++;
3469 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
3470 }
3471 }
3472 else
3473 {
3474 /*
3475 * We should never get here unless there is a genuine shortage of
3476 * memory (or some internal error). Flag the error so the VM can be
3477 * suspended ASAP and the user informed. If we're totally out of
3478 * handy pages we will return failure.
3479 */
3480 /* Report the failure. */
3481 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
3482 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
3483 rc, rcAlloc, rcSeed,
3484 pVM->pgm.s.cHandyPages,
3485 pVM->pgm.s.cAllPages,
3486 pVM->pgm.s.cPrivatePages,
3487 pVM->pgm.s.cSharedPages,
3488 pVM->pgm.s.cZeroPages));
3489 if ( rc != VERR_NO_MEMORY
3490 && rc != VERR_LOCK_FAILED)
3491 {
3492 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3493 {
3494 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
3495 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
3496 pVM->pgm.s.aHandyPages[i].idSharedPage));
3497 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
3498 if (idPage != NIL_GMM_PAGEID)
3499 {
3500 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
3501 pRam;
3502 pRam = pRam->pNextR3)
3503 {
3504 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
3505 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3506 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
3507 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
3508 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
3509 }
3510 }
3511 }
3512 }
3513
3514 /* Set the FFs and adjust rc. */
3515 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
3516 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
3517 if ( rc == VERR_NO_MEMORY
3518 || rc == VERR_LOCK_FAILED)
3519 rc = VINF_EM_NO_MEMORY;
3520 }
3521
3522 pgmUnlock(pVM);
3523 return rc;
3524}
3525
3526
3527/**
3528 * Frees the specified RAM page and replaces it with the ZERO page.
3529 *
3530 * This is used by ballooning, remapping MMIO2 and RAM reset.
3531 *
3532 * @param pVM Pointer to the shared VM structure.
3533 * @param pReq Pointer to the request.
3534 * @param pPage Pointer to the page structure.
3535 * @param GCPhys The guest physical address of the page, if applicable.
3536 *
3537 * @remarks The caller must own the PGM lock.
3538 */
3539static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
3540{
3541 /*
3542 * Assert sanity.
3543 */
3544 Assert(PGMIsLockOwner(pVM));
3545 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
3546 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
3547 {
3548 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3549 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
3550 }
3551
3552 if ( PGM_PAGE_IS_ZERO(pPage)
3553 || PGM_PAGE_IS_BALLOONED(pPage))
3554 return VINF_SUCCESS;
3555
3556 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
3557 Log3(("pgmPhysFreePage: idPage=%#x HCPhys=%RGp pPage=%R[pgmpage]\n", idPage, pPage));
3558 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
3559 || idPage > GMM_PAGEID_LAST
3560 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
3561 {
3562 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
3563 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
3564 }
3565
3566 /* update page count stats. */
3567 if (PGM_PAGE_IS_SHARED(pPage))
3568 pVM->pgm.s.cSharedPages--;
3569 else
3570 pVM->pgm.s.cPrivatePages--;
3571 pVM->pgm.s.cZeroPages++;
3572
3573 /* Deal with write monitored pages. */
3574 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
3575 {
3576 PGM_PAGE_SET_WRITTEN_TO(pPage);
3577 pVM->pgm.s.cWrittenToPages++;
3578 }
3579
3580 /*
3581 * pPage = ZERO page.
3582 */
3583 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
3584 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
3585 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
3586 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
3587
3588 /* Flush physical page map TLB entry. */
3589 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
3590
3591 /*
3592 * Make sure it's not in the handy page array.
3593 */
3594 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
3595 {
3596 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
3597 {
3598 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
3599 break;
3600 }
3601 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
3602 {
3603 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
3604 break;
3605 }
3606 }
3607
3608 /*
3609 * Push it onto the page array.
3610 */
3611 uint32_t iPage = *pcPendingPages;
3612 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
3613 *pcPendingPages += 1;
3614
3615 pReq->aPages[iPage].idPage = idPage;
3616
3617 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
3618 return VINF_SUCCESS;
3619
3620 /*
3621 * Flush the pages.
3622 */
3623 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
3624 if (RT_SUCCESS(rc))
3625 {
3626 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
3627 *pcPendingPages = 0;
3628 }
3629 return rc;
3630}
3631
3632
3633/**
3634 * Converts a GC physical address to a HC ring-3 pointer, with some
3635 * additional checks.
3636 *
3637 * @returns VBox status code.
3638 * @retval VINF_SUCCESS on success.
3639 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3640 * access handler of some kind.
3641 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3642 * accesses or is odd in any way.
3643 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3644 *
3645 * @param pVM The VM handle.
3646 * @param GCPhys The GC physical address to convert.
3647 * @param fWritable Whether write access is required.
3648 * @param ppv Where to store the pointer corresponding to GCPhys on
3649 * success.
3650 */
3651VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
3652{
3653 pgmLock(pVM);
3654
3655 PPGMRAMRANGE pRam;
3656 PPGMPAGE pPage;
3657 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
3658 if (RT_SUCCESS(rc))
3659 {
3660 if (PGM_PAGE_IS_BALLOONED(pPage))
3661 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3662 else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3663 rc = VINF_SUCCESS;
3664 else
3665 {
3666 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3667 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3668 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3669 {
3670 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
3671 * in -norawr0 mode. */
3672 if (fWritable)
3673 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3674 }
3675 else
3676 {
3677 /* Temporarily disabled physical handler(s), since the recompiler
3678 doesn't get notified when it's reset we'll have to pretend it's
3679 operating normally. */
3680 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3681 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3682 else
3683 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3684 }
3685 }
3686 if (RT_SUCCESS(rc))
3687 {
3688 int rc2;
3689
3690 /* Make sure what we return is writable. */
3691 if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
3692 switch (PGM_PAGE_GET_STATE(pPage))
3693 {
3694 case PGM_PAGE_STATE_ALLOCATED:
3695 break;
3696 case PGM_PAGE_STATE_BALLOONED:
3697 AssertFailed();
3698 break;
3699 case PGM_PAGE_STATE_ZERO:
3700 case PGM_PAGE_STATE_SHARED:
3701 case PGM_PAGE_STATE_WRITE_MONITORED:
3702 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
3703 AssertLogRelRCReturn(rc2, rc2);
3704 break;
3705 }
3706
3707 /* Get a ring-3 mapping of the address. */
3708 PPGMPAGER3MAPTLBE pTlbe;
3709 rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
3710 AssertLogRelRCReturn(rc2, rc2);
3711 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
3712 /** @todo mapping/locking hell; this isn't horribly efficient since
3713 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
3714
3715 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3716 }
3717 else
3718 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3719
3720 /* else: handler catching all access, no pointer returned. */
3721 }
3722 else
3723 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3724
3725 pgmUnlock(pVM);
3726 return rc;
3727}
3728
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette