VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 17622

Last change on this file since 17622 was 17538, checked in by vboxsync, 16 years ago

REM,PGM: Added two mutualy exclusive flags to REMR3NotifyPhysRamRegister to indicate whether it's MMIO2 or RAM that's being registered. This is for the new code only, the old one sticks to the MM_RAM_FLAGS.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 107.7 KB
Line 
1/* $Id: PGMPhys.cpp 17538 2009-03-08 05:32:49Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/csam.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <VBox/log.h>
44#include <iprt/thread.h>
45#include <iprt/string.h>
46
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** The number of pages to free in one batch. */
52#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
53
54
55/*******************************************************************************
56* Internal Functions *
57*******************************************************************************/
58static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
59static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
60
61
62/*
63 * PGMR3PhysReadU8-64
64 * PGMR3PhysWriteU8-64
65 */
66#define PGMPHYSFN_READNAME PGMR3PhysReadU8
67#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
68#define PGMPHYS_DATASIZE 1
69#define PGMPHYS_DATATYPE uint8_t
70#include "PGMPhysRWTmpl.h"
71
72#define PGMPHYSFN_READNAME PGMR3PhysReadU16
73#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
74#define PGMPHYS_DATASIZE 2
75#define PGMPHYS_DATATYPE uint16_t
76#include "PGMPhysRWTmpl.h"
77
78#define PGMPHYSFN_READNAME PGMR3PhysReadU32
79#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
80#define PGMPHYS_DATASIZE 4
81#define PGMPHYS_DATATYPE uint32_t
82#include "PGMPhysRWTmpl.h"
83
84#define PGMPHYSFN_READNAME PGMR3PhysReadU64
85#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
86#define PGMPHYS_DATASIZE 8
87#define PGMPHYS_DATATYPE uint64_t
88#include "PGMPhysRWTmpl.h"
89
90
91/**
92 * EMT worker for PGMR3PhysReadExternal.
93 */
94static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
95{
96 PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead);
97 return VINF_SUCCESS;
98}
99
100
101/**
102 * Write to physical memory, external users.
103 *
104 * @returns VBox status code.
105 * @retval VINF_SUCCESS.
106 *
107 * @param pVM VM Handle.
108 * @param GCPhys Physical address to write to.
109 * @param pvBuf What to write.
110 * @param cbWrite How many bytes to write.
111 *
112 * @thread Any but EMTs.
113 */
114VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
115{
116 VM_ASSERT_OTHER_THREAD(pVM);
117
118 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
119 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
120
121 pgmLock(pVM);
122
123 /*
124 * Copy loop on ram ranges.
125 */
126 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
127 for (;;)
128 {
129 /* Find range. */
130 while (pRam && GCPhys > pRam->GCPhysLast)
131 pRam = pRam->CTX_SUFF(pNext);
132 /* Inside range or not? */
133 if (pRam && GCPhys >= pRam->GCPhys)
134 {
135 /*
136 * Must work our way thru this page by page.
137 */
138 RTGCPHYS off = GCPhys - pRam->GCPhys;
139 while (off < pRam->cb)
140 {
141 unsigned iPage = off >> PAGE_SHIFT;
142 PPGMPAGE pPage = &pRam->aPages[iPage];
143
144 /*
145 * If the page has an ALL access handler, we'll have to
146 * delegate the job to EMT.
147 */
148 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
149 {
150 pgmUnlock(pVM);
151
152 PVMREQ pReq = NULL;
153 int rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
154 (PFNRT)pgmR3PhysReadExternalEMT, 4, pVM, &GCPhys, pvBuf, cbRead);
155 if (RT_SUCCESS(rc))
156 {
157 rc = pReq->iStatus;
158 VMR3ReqFree(pReq);
159 }
160 return rc;
161 }
162 Assert(!PGM_PAGE_IS_MMIO(pPage));
163
164 /*
165 * Simple stuff, go ahead.
166 */
167 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
168 if (cb > cbRead)
169 cb = cbRead;
170 const void *pvSrc;
171 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
172 if (RT_SUCCESS(rc))
173 memcpy(pvBuf, pvSrc, cb);
174 else
175 {
176 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
177 pRam->GCPhys + off, pPage, rc));
178 memset(pvBuf, 0xff, cb);
179 }
180
181 /* next page */
182 if (cb >= cbRead)
183 {
184 pgmUnlock(pVM);
185 return VINF_SUCCESS;
186 }
187 cbRead -= cb;
188 off += cb;
189 GCPhys += cb;
190 pvBuf = (char *)pvBuf + cb;
191 } /* walk pages in ram range. */
192 }
193 else
194 {
195 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
196
197 /*
198 * Unassigned address space.
199 */
200 if (!pRam)
201 break;
202 size_t cb = pRam->GCPhys - GCPhys;
203 if (cb >= cbRead)
204 {
205 memset(pvBuf, 0xff, cbRead);
206 break;
207 }
208 memset(pvBuf, 0xff, cb);
209
210 cbRead -= cb;
211 pvBuf = (char *)pvBuf + cb;
212 GCPhys += cb;
213 }
214 } /* Ram range walk */
215
216 pgmUnlock(pVM);
217
218 return VINF_SUCCESS;
219}
220
221
222/**
223 * EMT worker for PGMR3PhysWriteExternal.
224 */
225static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
226{
227 /** @todo VERR_EM_NO_MEMORY */
228 PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite);
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Write to physical memory, external users.
235 *
236 * @returns VBox status code.
237 * @retval VINF_SUCCESS.
238 * @retval VERR_EM_NO_MEMORY.
239 *
240 * @param pVM VM Handle.
241 * @param GCPhys Physical address to write to.
242 * @param pvBuf What to write.
243 * @param cbWrite How many bytes to write.
244 *
245 * @thread Any but EMTs.
246 */
247VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
248{
249 VM_ASSERT_OTHER_THREAD(pVM);
250
251 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMR3PhysWriteExternal after pgmR3Save()!\n"));
252 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
253 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
254
255 pgmLock(pVM);
256
257 /*
258 * Copy loop on ram ranges, stop when we hit something difficult.
259 */
260 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
261 for (;;)
262 {
263 /* Find range. */
264 while (pRam && GCPhys > pRam->GCPhysLast)
265 pRam = pRam->CTX_SUFF(pNext);
266 /* Inside range or not? */
267 if (pRam && GCPhys >= pRam->GCPhys)
268 {
269 /*
270 * Must work our way thru this page by page.
271 */
272 RTGCPTR off = GCPhys - pRam->GCPhys;
273 while (off < pRam->cb)
274 {
275 RTGCPTR iPage = off >> PAGE_SHIFT;
276 PPGMPAGE pPage = &pRam->aPages[iPage];
277
278 /*
279 * It the page is in any way problematic, we have to
280 * do the work on the EMT. Anything that needs to be made
281 * writable or involves access handlers is problematic.
282 */
283 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
284 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
285 {
286 pgmUnlock(pVM);
287
288 PVMREQ pReq = NULL;
289 int rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT,
290 (PFNRT)pgmR3PhysWriteExternalEMT, 4, pVM, &GCPhys, pvBuf, cbWrite);
291 if (RT_SUCCESS(rc))
292 {
293 rc = pReq->iStatus;
294 VMR3ReqFree(pReq);
295 }
296 return rc;
297 }
298 Assert(!PGM_PAGE_IS_MMIO(pPage));
299
300 /*
301 * Simple stuff, go ahead.
302 */
303 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
304 if (cb > cbWrite)
305 cb = cbWrite;
306 void *pvDst;
307 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
308 if (RT_SUCCESS(rc))
309 memcpy(pvDst, pvBuf, cb);
310 else
311 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
312 pRam->GCPhys + off, pPage, rc));
313
314 /* next page */
315 if (cb >= cbWrite)
316 {
317 pgmUnlock(pVM);
318 return VINF_SUCCESS;
319 }
320
321 cbWrite -= cb;
322 off += cb;
323 GCPhys += cb;
324 pvBuf = (const char *)pvBuf + cb;
325 } /* walk pages in ram range */
326 }
327 else
328 {
329 /*
330 * Unassigned address space, skip it.
331 */
332 if (!pRam)
333 break;
334 size_t cb = pRam->GCPhys - GCPhys;
335 if (cb >= cbWrite)
336 break;
337 cbWrite -= cb;
338 pvBuf = (const char *)pvBuf + cb;
339 GCPhys += cb;
340 }
341 } /* Ram range walk */
342
343 pgmUnlock(pVM);
344 return VINF_SUCCESS;
345}
346
347
348
349/**
350 * Links a new RAM range into the list.
351 *
352 * @param pVM Pointer to the shared VM structure.
353 * @param pNew Pointer to the new list entry.
354 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
355 */
356static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
357{
358 pgmLock(pVM);
359
360 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
361 pNew->pNextR3 = pRam;
362 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
363 pNew->pNextRC = pRam ? MMHyperCCToRC(pVM, pRam) : NIL_RTRCPTR;
364
365 if (pPrev)
366 {
367 pPrev->pNextR3 = pNew;
368 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
369 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
370 }
371 else
372 {
373 pVM->pgm.s.pRamRangesR3 = pNew;
374 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
375 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
376 }
377
378 pgmUnlock(pVM);
379}
380
381
382/**
383 * Unlink an existing RAM range from the list.
384 *
385 * @param pVM Pointer to the shared VM structure.
386 * @param pRam Pointer to the new list entry.
387 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
388 */
389static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
390{
391 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
392
393 pgmLock(pVM);
394
395 PPGMRAMRANGE pNext = pRam->pNextR3;
396 if (pPrev)
397 {
398 pPrev->pNextR3 = pNext;
399 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
400 pPrev->pNextRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
401 }
402 else
403 {
404 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
405 pVM->pgm.s.pRamRangesR3 = pNext;
406 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
407 pVM->pgm.s.pRamRangesRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
408 }
409
410 pgmUnlock(pVM);
411}
412
413
414/**
415 * Unlink an existing RAM range from the list.
416 *
417 * @param pVM Pointer to the shared VM structure.
418 * @param pRam Pointer to the new list entry.
419 */
420static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
421{
422 /* find prev. */
423 PPGMRAMRANGE pPrev = NULL;
424 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
425 while (pCur != pRam)
426 {
427 pPrev = pCur;
428 pCur = pCur->pNextR3;
429 }
430 AssertFatal(pCur);
431
432 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
433}
434
435
436/**
437 * Sets up a range RAM.
438 *
439 * This will check for conflicting registrations, make a resource
440 * reservation for the memory (with GMM), and setup the per-page
441 * tracking structures (PGMPAGE).
442 *
443 * @returns VBox stutus code.
444 * @param pVM Pointer to the shared VM structure.
445 * @param GCPhys The physical address of the RAM.
446 * @param cb The size of the RAM.
447 * @param pszDesc The description - not copied, so, don't free or change it.
448 */
449VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
450{
451 /*
452 * Validate input.
453 */
454 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
455 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
456 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
457 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
458 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
459 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
460 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
461 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
462
463 /*
464 * Find range location and check for conflicts.
465 * (We don't lock here because the locking by EMT is only required on update.)
466 */
467 PPGMRAMRANGE pPrev = NULL;
468 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
469 while (pRam && GCPhysLast >= pRam->GCPhys)
470 {
471 if ( GCPhysLast >= pRam->GCPhys
472 && GCPhys <= pRam->GCPhysLast)
473 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
474 GCPhys, GCPhysLast, pszDesc,
475 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
476 VERR_PGM_RAM_CONFLICT);
477
478 /* next */
479 pPrev = pRam;
480 pRam = pRam->pNextR3;
481 }
482
483 /*
484 * Register it with GMM (the API bitches).
485 */
486 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
487 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
488 if (RT_FAILURE(rc))
489 return rc;
490
491 /*
492 * Allocate RAM range.
493 */
494 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
495 PPGMRAMRANGE pNew;
496 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
497 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
498
499 /*
500 * Initialize the range.
501 */
502 pNew->GCPhys = GCPhys;
503 pNew->GCPhysLast = GCPhysLast;
504 pNew->pszDesc = pszDesc;
505 pNew->cb = cb;
506 pNew->fFlags = 0;
507
508 pNew->pvR3 = NULL;
509#ifndef VBOX_WITH_NEW_PHYS_CODE
510 pNew->paChunkR3Ptrs = NULL;
511
512 /* Allocate memory for chunk to HC ptr lookup array. */
513 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
514 AssertRCReturn(rc, rc);
515 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
516
517#endif
518 RTGCPHYS iPage = cPages;
519 while (iPage-- > 0)
520 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
521
522 /* Update the page count stats. */
523 pVM->pgm.s.cZeroPages += cPages;
524 pVM->pgm.s.cAllPages += cPages;
525
526 /*
527 * Insert the new RAM range.
528 */
529 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
530
531 /*
532 * Notify REM.
533 */
534#ifdef VBOX_WITH_NEW_PHYS_CODE
535 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
536#else
537 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
538#endif
539
540 return VINF_SUCCESS;
541}
542
543
544/**
545 * Resets (zeros) the RAM.
546 *
547 * ASSUMES that the caller owns the PGM lock.
548 *
549 * @returns VBox status code.
550 * @param pVM Pointer to the shared VM structure.
551 */
552int pgmR3PhysRamReset(PVM pVM)
553{
554#ifdef VBOX_WITH_NEW_PHYS_CODE
555 /*
556 * We batch up pages before freeing them.
557 */
558 uint32_t cPendingPages = 0;
559 PGMMFREEPAGESREQ pReq;
560 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
561 AssertLogRelRCReturn(rc, rc);
562#endif
563
564 /*
565 * Walk the ram ranges.
566 */
567 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
568 {
569 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
570#ifdef VBOX_WITH_NEW_PHYS_CODE
571 if (!pVM->pgm.s.fRamPreAlloc)
572 {
573 /* Replace all RAM pages by ZERO pages. */
574 while (iPage-- > 0)
575 {
576 PPGMPAGE pPage = &pRam->aPages[iPage];
577 switch (PGM_PAGE_GET_TYPE(pPage))
578 {
579 case PGMPAGETYPE_RAM:
580 if (!PGM_PAGE_IS_ZERO(pPage))
581 {
582 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
583 AssertLogRelRCReturn(rc, rc);
584 }
585 break;
586
587 case PGMPAGETYPE_MMIO2:
588 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
589 case PGMPAGETYPE_ROM:
590 case PGMPAGETYPE_MMIO:
591 break;
592 default:
593 AssertFailed();
594 }
595 } /* for each page */
596 }
597 else
598#endif
599 {
600 /* Zero the memory. */
601 while (iPage-- > 0)
602 {
603 PPGMPAGE pPage = &pRam->aPages[iPage];
604 switch (PGM_PAGE_GET_TYPE(pPage))
605 {
606#ifndef VBOX_WITH_NEW_PHYS_CODE
607 case PGMPAGETYPE_INVALID:
608 case PGMPAGETYPE_RAM:
609 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
610 {
611 /* shadow ram is reloaded elsewhere. */
612 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
613 continue;
614 }
615 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
616 {
617 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
618 if (pRam->paChunkR3Ptrs[iChunk])
619 ASMMemZero32((char *)pRam->paChunkR3Ptrs[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
620 }
621 else
622 ASMMemZero32((char *)pRam->pvR3 + (iPage << PAGE_SHIFT), PAGE_SIZE);
623 break;
624#else /* VBOX_WITH_NEW_PHYS_CODE */
625 case PGMPAGETYPE_RAM:
626 switch (PGM_PAGE_GET_STATE(pPage))
627 {
628 case PGM_PAGE_STATE_ZERO:
629 break;
630 case PGM_PAGE_STATE_SHARED:
631 case PGM_PAGE_STATE_WRITE_MONITORED:
632 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
633 AssertLogRelRCReturn(rc, rc);
634 case PGM_PAGE_STATE_ALLOCATED:
635 {
636 void *pvPage;
637 PPGMPAGEMAP pMapIgnored;
638 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pMapIgnored, &pvPage);
639 AssertLogRelRCReturn(rc, rc);
640 ASMMemZeroPage(pvPage);
641 break;
642 }
643 }
644 break;
645#endif /* VBOX_WITH_NEW_PHYS_CODE */
646
647 case PGMPAGETYPE_MMIO2:
648 case PGMPAGETYPE_ROM_SHADOW:
649 case PGMPAGETYPE_ROM:
650 case PGMPAGETYPE_MMIO:
651 break;
652 default:
653 AssertFailed();
654
655 }
656 } /* for each page */
657 }
658
659 }
660
661#ifdef VBOX_WITH_NEW_PHYS_CODE
662 /*
663 * Finish off any pages pending freeing.
664 */
665 if (cPendingPages)
666 {
667 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
668 AssertLogRelRCReturn(rc, rc);
669 }
670 GMMR3FreePagesCleanup(pReq);
671#endif
672
673
674 return VINF_SUCCESS;
675}
676
677
678/**
679 * This is the interface IOM is using to register an MMIO region.
680 *
681 * It will check for conflicts and ensure that a RAM range structure
682 * is present before calling the PGMR3HandlerPhysicalRegister API to
683 * register the callbacks.
684 *
685 * @returns VBox status code.
686 *
687 * @param pVM Pointer to the shared VM structure.
688 * @param GCPhys The start of the MMIO region.
689 * @param cb The size of the MMIO region.
690 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
691 * @param pvUserR3 The user argument for R3.
692 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
693 * @param pvUserR0 The user argument for R0.
694 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
695 * @param pvUserRC The user argument for RC.
696 * @param pszDesc The description of the MMIO region.
697 */
698VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
699 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
700 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
701 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
702 R3PTRTYPE(const char *) pszDesc)
703{
704 /*
705 * Assert on some assumption.
706 */
707 VM_ASSERT_EMT(pVM);
708 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
709 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
710 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
711 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
712
713 /*
714 * Make sure there's a RAM range structure for the region.
715 */
716 int rc;
717 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
718 bool fRamExists = false;
719 PPGMRAMRANGE pRamPrev = NULL;
720 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
721 while (pRam && GCPhysLast >= pRam->GCPhys)
722 {
723 if ( GCPhysLast >= pRam->GCPhys
724 && GCPhys <= pRam->GCPhysLast)
725 {
726 /* Simplification: all within the same range. */
727 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
728 && GCPhysLast <= pRam->GCPhysLast,
729 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
730 GCPhys, GCPhysLast, pszDesc,
731 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
732 VERR_PGM_RAM_CONFLICT);
733
734 /* Check that it's all RAM or MMIO pages. */
735 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
736 uint32_t cLeft = cb >> PAGE_SHIFT;
737 while (cLeft-- > 0)
738 {
739 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
740 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
741 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
742 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
743 VERR_PGM_RAM_CONFLICT);
744 pPage++;
745 }
746
747 /* Looks good. */
748 fRamExists = true;
749 break;
750 }
751
752 /* next */
753 pRamPrev = pRam;
754 pRam = pRam->pNextR3;
755 }
756 PPGMRAMRANGE pNew;
757 if (fRamExists)
758 pNew = NULL;
759 else
760 {
761 /*
762 * No RAM range, insert an ad-hoc one.
763 *
764 * Note that we don't have to tell REM about this range because
765 * PGMHandlerPhysicalRegisterEx will do that for us.
766 */
767 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
768
769 const uint32_t cPages = cb >> PAGE_SHIFT;
770 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
771 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
772 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
773
774 /* Initialize the range. */
775 pNew->GCPhys = GCPhys;
776 pNew->GCPhysLast = GCPhysLast;
777 pNew->pszDesc = pszDesc;
778 pNew->cb = cb;
779 pNew->fFlags = 0; /* Some MMIO flag here? */
780
781 pNew->pvR3 = NULL;
782#ifndef VBOX_WITH_NEW_PHYS_CODE
783 pNew->paChunkR3Ptrs = NULL;
784#endif
785
786 uint32_t iPage = cPages;
787 while (iPage-- > 0)
788 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
789 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
790
791 /* update the page count stats. */
792 pVM->pgm.s.cZeroPages += cPages;
793 pVM->pgm.s.cAllPages += cPages;
794
795 /* link it */
796 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
797 }
798
799 /*
800 * Register the access handler.
801 */
802 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
803 pfnHandlerR3, pvUserR3,
804 pfnHandlerR0, pvUserR0,
805 pfnHandlerRC, pvUserRC, pszDesc);
806 if ( RT_FAILURE(rc)
807 && !fRamExists)
808 {
809 pVM->pgm.s.cZeroPages -= cb >> PAGE_SHIFT;
810 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
811
812 /* remove the ad-hoc range. */
813 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
814 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
815 MMHyperFree(pVM, pRam);
816 }
817
818 return rc;
819}
820
821
822/**
823 * This is the interface IOM is using to register an MMIO region.
824 *
825 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
826 * any ad-hoc PGMRAMRANGE left behind.
827 *
828 * @returns VBox status code.
829 * @param pVM Pointer to the shared VM structure.
830 * @param GCPhys The start of the MMIO region.
831 * @param cb The size of the MMIO region.
832 */
833VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
834{
835 VM_ASSERT_EMT(pVM);
836
837 /*
838 * First deregister the handler, then check if we should remove the ram range.
839 */
840 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
841 if (RT_SUCCESS(rc))
842 {
843 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
844 PPGMRAMRANGE pRamPrev = NULL;
845 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
846 while (pRam && GCPhysLast >= pRam->GCPhys)
847 {
848 /*if ( GCPhysLast >= pRam->GCPhys
849 && GCPhys <= pRam->GCPhysLast) - later */
850 if ( GCPhysLast == pRam->GCPhysLast
851 && GCPhys == pRam->GCPhys)
852 {
853 Assert(pRam->cb == cb);
854
855 /*
856 * See if all the pages are dead MMIO pages.
857 */
858 bool fAllMMIO = true;
859 PPGMPAGE pPage = &pRam->aPages[0];
860 uint32_t const cPages = cb >> PAGE_SHIFT;
861 uint32_t cLeft = cPages;
862 while (cLeft-- > 0)
863 {
864 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
865 /*|| not-out-of-action later */)
866 {
867 fAllMMIO = false;
868 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
869 break;
870 }
871 Assert(PGM_PAGE_IS_ZERO(pPage));
872 pPage++;
873 }
874
875 /*
876 * Unlink it and free if it's all MMIO.
877 */
878 if (fAllMMIO)
879 {
880 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
881 GCPhys, GCPhysLast, pRam->pszDesc));
882
883 pVM->pgm.s.cAllPages -= cPages;
884 pVM->pgm.s.cZeroPages -= cPages;
885
886 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
887 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
888 MMHyperFree(pVM, pRam);
889 }
890 break;
891 }
892
893 /* next */
894 pRamPrev = pRam;
895 pRam = pRam->pNextR3;
896 }
897 }
898
899 return rc;
900}
901
902
903/**
904 * Locate a MMIO2 range.
905 *
906 * @returns Pointer to the MMIO2 range.
907 * @param pVM Pointer to the shared VM structure.
908 * @param pDevIns The device instance owning the region.
909 * @param iRegion The region.
910 */
911DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
912{
913 /*
914 * Search the list.
915 */
916 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
917 if ( pCur->pDevInsR3 == pDevIns
918 && pCur->iRegion == iRegion)
919 return pCur;
920 return NULL;
921}
922
923
924/**
925 * Allocate and register an MMIO2 region.
926 *
927 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
928 * RAM associated with a device. It is also non-shared memory with a
929 * permanent ring-3 mapping and page backing (presently).
930 *
931 * A MMIO2 range may overlap with base memory if a lot of RAM
932 * is configured for the VM, in which case we'll drop the base
933 * memory pages. Presently we will make no attempt to preserve
934 * anything that happens to be present in the base memory that
935 * is replaced, this is of course incorrectly but it's too much
936 * effort.
937 *
938 * @returns VBox status code.
939 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
940 * @retval VERR_ALREADY_EXISTS if the region already exists.
941 *
942 * @param pVM Pointer to the shared VM structure.
943 * @param pDevIns The device instance owning the region.
944 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
945 * this number has to be the number of that region. Otherwise
946 * it can be any number safe UINT8_MAX.
947 * @param cb The size of the region. Must be page aligned.
948 * @param fFlags Reserved for future use, must be zero.
949 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
950 * @param pszDesc The description.
951 */
952VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
953{
954 /*
955 * Validate input.
956 */
957 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
958 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
959 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
960 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
961 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
962 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
963 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
964 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
965 AssertReturn(cb, VERR_INVALID_PARAMETER);
966 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
967
968 const uint32_t cPages = cb >> PAGE_SHIFT;
969 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
970 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
971
972 /*
973 * Try reserve and allocate the backing memory first as this is what is
974 * most likely to fail.
975 */
976 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
977 if (RT_FAILURE(rc))
978 return rc;
979
980 void *pvPages;
981 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
982 if (RT_SUCCESS(rc))
983 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
984 if (RT_SUCCESS(rc))
985 {
986 memset(pvPages, 0, cPages * PAGE_SIZE);
987
988 /*
989 * Create the MMIO2 range record for it.
990 */
991 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
992 PPGMMMIO2RANGE pNew;
993 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
994 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
995 if (RT_SUCCESS(rc))
996 {
997 pNew->pDevInsR3 = pDevIns;
998 pNew->pvR3 = pvPages;
999 //pNew->pNext = NULL;
1000 //pNew->fMapped = false;
1001 //pNew->fOverlapping = false;
1002 pNew->iRegion = iRegion;
1003 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
1004 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
1005 pNew->RamRange.pszDesc = pszDesc;
1006 pNew->RamRange.cb = cb;
1007 //pNew->RamRange.fFlags = 0;
1008
1009 pNew->RamRange.pvR3 = pvPages; ///@todo remove this [new phys code]
1010#ifndef VBOX_WITH_NEW_PHYS_CODE
1011 pNew->RamRange.paChunkR3Ptrs = NULL; ///@todo remove this [new phys code]
1012#endif
1013
1014 uint32_t iPage = cPages;
1015 while (iPage-- > 0)
1016 {
1017 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
1018 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1019 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1020 }
1021
1022 /* update page count stats */
1023 pVM->pgm.s.cAllPages += cPages;
1024 pVM->pgm.s.cPrivatePages += cPages;
1025
1026 /*
1027 * Link it into the list.
1028 * Since there is no particular order, just push it.
1029 */
1030 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
1031 pVM->pgm.s.pMmio2RangesR3 = pNew;
1032
1033 *ppv = pvPages;
1034 RTMemTmpFree(paPages);
1035 return VINF_SUCCESS;
1036 }
1037
1038 SUPR3PageFreeEx(pvPages, cPages);
1039 }
1040 RTMemTmpFree(paPages);
1041 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
1042 return rc;
1043}
1044
1045
1046/**
1047 * Deregisters and frees an MMIO2 region.
1048 *
1049 * Any physical (and virtual) access handlers registered for the region must
1050 * be deregistered before calling this function.
1051 *
1052 * @returns VBox status code.
1053 * @param pVM Pointer to the shared VM structure.
1054 * @param pDevIns The device instance owning the region.
1055 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
1056 */
1057VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
1058{
1059 /*
1060 * Validate input.
1061 */
1062 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1063 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1064 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
1065
1066 int rc = VINF_SUCCESS;
1067 unsigned cFound = 0;
1068 PPGMMMIO2RANGE pPrev = NULL;
1069 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
1070 while (pCur)
1071 {
1072 if ( pCur->pDevInsR3 == pDevIns
1073 && ( iRegion == UINT32_MAX
1074 || pCur->iRegion == iRegion))
1075 {
1076 cFound++;
1077
1078 /*
1079 * Unmap it if it's mapped.
1080 */
1081 if (pCur->fMapped)
1082 {
1083 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
1084 AssertRC(rc2);
1085 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1086 rc = rc2;
1087 }
1088
1089 /*
1090 * Unlink it
1091 */
1092 PPGMMMIO2RANGE pNext = pCur->pNextR3;
1093 if (pPrev)
1094 pPrev->pNextR3 = pNext;
1095 else
1096 pVM->pgm.s.pMmio2RangesR3 = pNext;
1097 pCur->pNextR3 = NULL;
1098
1099 /*
1100 * Free the memory.
1101 */
1102 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
1103 AssertRC(rc2);
1104 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1105 rc = rc2;
1106
1107 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
1108 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
1109 AssertRC(rc2);
1110 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1111 rc = rc2;
1112
1113 /* we're leaking hyper memory here if done at runtime. */
1114 Assert( VMR3GetState(pVM) == VMSTATE_OFF
1115 || VMR3GetState(pVM) == VMSTATE_DESTROYING
1116 || VMR3GetState(pVM) == VMSTATE_TERMINATED
1117 || VMR3GetState(pVM) == VMSTATE_CREATING);
1118 /*rc = MMHyperFree(pVM, pCur);
1119 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
1120
1121
1122 /* update page count stats */
1123 pVM->pgm.s.cAllPages -= cPages;
1124 pVM->pgm.s.cPrivatePages -= cPages;
1125
1126 /* next */
1127 pCur = pNext;
1128 }
1129 else
1130 {
1131 pPrev = pCur;
1132 pCur = pCur->pNextR3;
1133 }
1134 }
1135
1136 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
1137}
1138
1139
1140/**
1141 * Maps a MMIO2 region.
1142 *
1143 * This is done when a guest / the bios / state loading changes the
1144 * PCI config. The replacing of base memory has the same restrictions
1145 * as during registration, of course.
1146 *
1147 * @returns VBox status code.
1148 *
1149 * @param pVM Pointer to the shared VM structure.
1150 * @param pDevIns The
1151 */
1152VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1153{
1154 /*
1155 * Validate input
1156 */
1157 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1158 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1159 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1160 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1161 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1162 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1163
1164 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1165 AssertReturn(pCur, VERR_NOT_FOUND);
1166 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
1167 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
1168 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
1169
1170 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
1171 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1172
1173 /*
1174 * Find our location in the ram range list, checking for
1175 * restriction we don't bother implementing yet (partially overlapping).
1176 */
1177 bool fRamExists = false;
1178 PPGMRAMRANGE pRamPrev = NULL;
1179 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1180 while (pRam && GCPhysLast >= pRam->GCPhys)
1181 {
1182 if ( GCPhys <= pRam->GCPhysLast
1183 && GCPhysLast >= pRam->GCPhys)
1184 {
1185 /* completely within? */
1186 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1187 && GCPhysLast <= pRam->GCPhysLast,
1188 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
1189 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
1190 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1191 VERR_PGM_RAM_CONFLICT);
1192 fRamExists = true;
1193 break;
1194 }
1195
1196 /* next */
1197 pRamPrev = pRam;
1198 pRam = pRam->pNextR3;
1199 }
1200 if (fRamExists)
1201 {
1202 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1203 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1204 while (cPagesLeft-- > 0)
1205 {
1206 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1207 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
1208 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
1209 VERR_PGM_RAM_CONFLICT);
1210 pPage++;
1211 }
1212 }
1213 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
1214 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
1215
1216 /*
1217 * Make the changes.
1218 */
1219 pgmLock(pVM);
1220
1221 pCur->RamRange.GCPhys = GCPhys;
1222 pCur->RamRange.GCPhysLast = GCPhysLast;
1223 pCur->fMapped = true;
1224 pCur->fOverlapping = fRamExists;
1225
1226 if (fRamExists)
1227 {
1228 uint32_t cPendingPages = 0;
1229 PGMMFREEPAGESREQ pReq;
1230 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1231 AssertLogRelRCReturn(rc, rc);
1232
1233 /* replace the pages, freeing all present RAM pages. */
1234 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
1235 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1236 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1237 while (cPagesLeft-- > 0)
1238 {
1239 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
1240 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1241
1242 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
1243 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
1244 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
1245 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
1246
1247 pVM->pgm.s.cZeroPages--;
1248 GCPhys += PAGE_SIZE;
1249 pPageSrc++;
1250 pPageDst++;
1251 }
1252
1253 if (cPendingPages)
1254 {
1255 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1256 AssertLogRelRCReturn(rc, rc);
1257 }
1258 GMMR3FreePagesCleanup(pReq);
1259 }
1260 else
1261 {
1262 /* link in the ram range */
1263 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
1264 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
1265 }
1266
1267 pgmUnlock(pVM);
1268
1269 return VINF_SUCCESS;
1270}
1271
1272
1273/**
1274 * Unmaps a MMIO2 region.
1275 *
1276 * This is done when a guest / the bios / state loading changes the
1277 * PCI config. The replacing of base memory has the same restrictions
1278 * as during registration, of course.
1279 */
1280VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
1281{
1282 /*
1283 * Validate input
1284 */
1285 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1286 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1287 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1288 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1289 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1290 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1291
1292 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1293 AssertReturn(pCur, VERR_NOT_FOUND);
1294 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
1295 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
1296 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
1297
1298 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
1299 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
1300
1301 /*
1302 * Unmap it.
1303 */
1304 pgmLock(pVM);
1305
1306 if (pCur->fOverlapping)
1307 {
1308 /* Restore the RAM pages we've replaced. */
1309 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1310 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
1311 pRam = pRam->pNextR3;
1312
1313 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
1314 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
1315 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1316 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1317 while (cPagesLeft-- > 0)
1318 {
1319 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhysZeroPg);
1320 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
1321 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
1322 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
1323
1324 pVM->pgm.s.cZeroPages++;
1325 pPageDst++;
1326 }
1327 }
1328 else
1329 {
1330 REMR3NotifyPhysRamDeregister(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
1331 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
1332 }
1333
1334 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
1335 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
1336 pCur->fOverlapping = false;
1337 pCur->fMapped = false;
1338
1339 pgmUnlock(pVM);
1340
1341 return VINF_SUCCESS;
1342}
1343
1344
1345/**
1346 * Checks if the given address is an MMIO2 base address or not.
1347 *
1348 * @returns true/false accordingly.
1349 * @param pVM Pointer to the shared VM structure.
1350 * @param pDevIns The owner of the memory, optional.
1351 * @param GCPhys The address to check.
1352 */
1353VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1354{
1355 /*
1356 * Validate input
1357 */
1358 VM_ASSERT_EMT_RETURN(pVM, false);
1359 AssertPtrReturn(pDevIns, false);
1360 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1361 AssertReturn(GCPhys != 0, false);
1362 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1363
1364 /*
1365 * Search the list.
1366 */
1367 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1368 if (pCur->RamRange.GCPhys == GCPhys)
1369 {
1370 Assert(pCur->fMapped);
1371 return true;
1372 }
1373 return false;
1374}
1375
1376
1377/**
1378 * Gets the HC physical address of a page in the MMIO2 region.
1379 *
1380 * This is API is intended for MMHyper and shouldn't be called
1381 * by anyone else...
1382 *
1383 * @returns VBox status code.
1384 * @param pVM Pointer to the shared VM structure.
1385 * @param pDevIns The owner of the memory, optional.
1386 * @param iRegion The region.
1387 * @param off The page expressed an offset into the MMIO2 region.
1388 * @param pHCPhys Where to store the result.
1389 */
1390VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1391{
1392 /*
1393 * Validate input
1394 */
1395 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1396 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1397 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1398
1399 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1400 AssertReturn(pCur, VERR_NOT_FOUND);
1401 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1402
1403 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1404 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1405 return VINF_SUCCESS;
1406}
1407
1408
1409/**
1410 * Maps a portion of an MMIO2 region into kernel space (host).
1411 *
1412 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
1413 * or the VM is terminated.
1414 *
1415 * @return VBox status code.
1416 *
1417 * @param pVM Pointer to the shared VM structure.
1418 * @param pDevIns The device owning the MMIO2 memory.
1419 * @param iRegion The region.
1420 * @param off The offset into the region. Must be page aligned.
1421 * @param cb The number of bytes to map. Must be page aligned.
1422 * @param pszDesc Mapping description.
1423 * @param pR0Ptr Where to store the R0 address.
1424 */
1425VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
1426 const char *pszDesc, PRTR0PTR pR0Ptr)
1427{
1428 /*
1429 * Validate input.
1430 */
1431 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1432 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1433 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1434
1435 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1436 AssertReturn(pCur, VERR_NOT_FOUND);
1437 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1438 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1439 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1440
1441 /*
1442 * Pass the request on to the support library/driver.
1443 */
1444 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
1445
1446 return rc;
1447}
1448
1449
1450/**
1451 * Registers a ROM image.
1452 *
1453 * Shadowed ROM images requires double the amount of backing memory, so,
1454 * don't use that unless you have to. Shadowing of ROM images is process
1455 * where we can select where the reads go and where the writes go. On real
1456 * hardware the chipset provides means to configure this. We provide
1457 * PGMR3PhysProtectROM() for this purpose.
1458 *
1459 * A read-only copy of the ROM image will always be kept around while we
1460 * will allocate RAM pages for the changes on demand (unless all memory
1461 * is configured to be preallocated).
1462 *
1463 * @returns VBox status.
1464 * @param pVM VM Handle.
1465 * @param pDevIns The device instance owning the ROM.
1466 * @param GCPhys First physical address in the range.
1467 * Must be page aligned!
1468 * @param cbRange The size of the range (in bytes).
1469 * Must be page aligned!
1470 * @param pvBinary Pointer to the binary data backing the ROM image.
1471 * This must be exactly \a cbRange in size.
1472 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
1473 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
1474 * @param pszDesc Pointer to description string. This must not be freed.
1475 *
1476 * @remark There is no way to remove the rom, automatically on device cleanup or
1477 * manually from the device yet. This isn't difficult in any way, it's
1478 * just not something we expect to be necessary for a while.
1479 */
1480VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1481 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
1482{
1483 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
1484 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
1485
1486 /*
1487 * Validate input.
1488 */
1489 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1490 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1491 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1492 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1493 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1494 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
1495 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1496 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
1497 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
1498
1499 const uint32_t cPages = cb >> PAGE_SHIFT;
1500
1501 /*
1502 * Find the ROM location in the ROM list first.
1503 */
1504 PPGMROMRANGE pRomPrev = NULL;
1505 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
1506 while (pRom && GCPhysLast >= pRom->GCPhys)
1507 {
1508 if ( GCPhys <= pRom->GCPhysLast
1509 && GCPhysLast >= pRom->GCPhys)
1510 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1511 GCPhys, GCPhysLast, pszDesc,
1512 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
1513 VERR_PGM_RAM_CONFLICT);
1514 /* next */
1515 pRomPrev = pRom;
1516 pRom = pRom->pNextR3;
1517 }
1518
1519 /*
1520 * Find the RAM location and check for conflicts.
1521 *
1522 * Conflict detection is a bit different than for RAM
1523 * registration since a ROM can be located within a RAM
1524 * range. So, what we have to check for is other memory
1525 * types (other than RAM that is) and that we don't span
1526 * more than one RAM range (layz).
1527 */
1528 bool fRamExists = false;
1529 PPGMRAMRANGE pRamPrev = NULL;
1530 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1531 while (pRam && GCPhysLast >= pRam->GCPhys)
1532 {
1533 if ( GCPhys <= pRam->GCPhysLast
1534 && GCPhysLast >= pRam->GCPhys)
1535 {
1536 /* completely within? */
1537 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1538 && GCPhysLast <= pRam->GCPhysLast,
1539 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
1540 GCPhys, GCPhysLast, pszDesc,
1541 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1542 VERR_PGM_RAM_CONFLICT);
1543 fRamExists = true;
1544 break;
1545 }
1546
1547 /* next */
1548 pRamPrev = pRam;
1549 pRam = pRam->pNextR3;
1550 }
1551 if (fRamExists)
1552 {
1553 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1554 uint32_t cPagesLeft = cPages;
1555 while (cPagesLeft-- > 0)
1556 {
1557 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1558 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
1559 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
1560 VERR_PGM_RAM_CONFLICT);
1561 Assert(PGM_PAGE_IS_ZERO(pPage));
1562 pPage++;
1563 }
1564 }
1565
1566 /*
1567 * Update the base memory reservation if necessary.
1568 */
1569 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
1570 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1571 cExtraBaseCost += cPages;
1572 if (cExtraBaseCost)
1573 {
1574 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
1575 if (RT_FAILURE(rc))
1576 return rc;
1577 }
1578
1579 /*
1580 * Allocate memory for the virgin copy of the RAM.
1581 */
1582 PGMMALLOCATEPAGESREQ pReq;
1583 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
1584 AssertRCReturn(rc, rc);
1585
1586 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1587 {
1588 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
1589 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
1590 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
1591 }
1592
1593 pgmLock(pVM);
1594 rc = GMMR3AllocatePagesPerform(pVM, pReq);
1595 pgmUnlock(pVM);
1596 if (RT_FAILURE(rc))
1597 {
1598 GMMR3AllocatePagesCleanup(pReq);
1599 return rc;
1600 }
1601
1602 /*
1603 * Allocate the new ROM range and RAM range (if necessary).
1604 */
1605 PPGMROMRANGE pRomNew;
1606 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
1607 if (RT_SUCCESS(rc))
1608 {
1609 PPGMRAMRANGE pRamNew = NULL;
1610 if (!fRamExists)
1611 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
1612 if (RT_SUCCESS(rc))
1613 {
1614 pgmLock(pVM);
1615
1616 /*
1617 * Initialize and insert the RAM range (if required).
1618 */
1619 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
1620 if (!fRamExists)
1621 {
1622 pRamNew->GCPhys = GCPhys;
1623 pRamNew->GCPhysLast = GCPhysLast;
1624 pRamNew->pszDesc = pszDesc;
1625 pRamNew->cb = cb;
1626 pRamNew->fFlags = 0;
1627 pRamNew->pvR3 = NULL;
1628
1629 PPGMPAGE pPage = &pRamNew->aPages[0];
1630 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1631 {
1632 PGM_PAGE_INIT(pPage,
1633 pReq->aPages[iPage].HCPhysGCPhys,
1634 pReq->aPages[iPage].idPage,
1635 PGMPAGETYPE_ROM,
1636 PGM_PAGE_STATE_ALLOCATED);
1637
1638 pRomPage->Virgin = *pPage;
1639 }
1640
1641 pVM->pgm.s.cAllPages += cPages;
1642 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
1643 }
1644 else
1645 {
1646 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1647 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1648 {
1649 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
1650 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
1651 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1652 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
1653
1654 pRomPage->Virgin = *pPage;
1655 }
1656
1657 pRamNew = pRam;
1658
1659 pVM->pgm.s.cZeroPages -= cPages;
1660 }
1661 pVM->pgm.s.cPrivatePages += cPages;
1662
1663 pgmUnlock(pVM);
1664
1665
1666 /*
1667 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
1668 *
1669 * If it's shadowed we'll register the handler after the ROM notification
1670 * so we get the access handler callbacks that we should. If it isn't
1671 * shadowed we'll do it the other way around to make REM use the built-in
1672 * ROM behavior and not the handler behavior (which is to route all access
1673 * to PGM atm).
1674 */
1675 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1676 {
1677 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
1678 rc = PGMR3HandlerPhysicalRegister(pVM,
1679 fFlags & PGMPHYS_ROM_FLAG_SHADOWED
1680 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1681 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
1682 GCPhys, GCPhysLast,
1683 pgmR3PhysRomWriteHandler, pRomNew,
1684 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1685 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
1686 }
1687 else
1688 {
1689 rc = PGMR3HandlerPhysicalRegister(pVM,
1690 fFlags & PGMPHYS_ROM_FLAG_SHADOWED
1691 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1692 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
1693 GCPhys, GCPhysLast,
1694 pgmR3PhysRomWriteHandler, pRomNew,
1695 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1696 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
1697 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
1698 }
1699 if (RT_SUCCESS(rc))
1700 {
1701 pgmLock(pVM);
1702
1703 /*
1704 * Copy the image over to the virgin pages.
1705 * This must be done after linking in the RAM range.
1706 */
1707 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
1708 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
1709 {
1710 void *pvDstPage;
1711 PPGMPAGEMAP pMapIgnored;
1712 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
1713 if (RT_FAILURE(rc))
1714 {
1715 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
1716 break;
1717 }
1718 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
1719 }
1720 if (RT_SUCCESS(rc))
1721 {
1722 /*
1723 * Initialize the ROM range.
1724 * Note that the Virgin member of the pages has already been initialized above.
1725 */
1726 pRomNew->GCPhys = GCPhys;
1727 pRomNew->GCPhysLast = GCPhysLast;
1728 pRomNew->cb = cb;
1729 pRomNew->fFlags = fFlags;
1730 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
1731 pRomNew->pszDesc = pszDesc;
1732
1733 for (unsigned iPage = 0; iPage < cPages; iPage++)
1734 {
1735 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
1736 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
1737 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1738 }
1739
1740 /* update the page count stats */
1741 pVM->pgm.s.cZeroPages += cPages;
1742 pVM->pgm.s.cAllPages += cPages;
1743
1744 /*
1745 * Insert the ROM range, tell REM and return successfully.
1746 */
1747 pRomNew->pNextR3 = pRom;
1748 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
1749 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
1750
1751 if (pRomPrev)
1752 {
1753 pRomPrev->pNextR3 = pRomNew;
1754 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
1755 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
1756 }
1757 else
1758 {
1759 pVM->pgm.s.pRomRangesR3 = pRomNew;
1760 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
1761 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
1762 }
1763
1764 GMMR3AllocatePagesCleanup(pReq);
1765 pgmUnlock(pVM);
1766 return VINF_SUCCESS;
1767 }
1768
1769 /* bail out */
1770
1771 pgmUnlock(pVM);
1772 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1773 AssertRC(rc2);
1774 pgmLock(pVM);
1775 }
1776
1777 if (!fRamExists)
1778 {
1779 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
1780 MMHyperFree(pVM, pRamNew);
1781 }
1782 }
1783 MMHyperFree(pVM, pRomNew);
1784 }
1785
1786 /** @todo Purge the mapping cache or something... */
1787 GMMR3FreeAllocatedPages(pVM, pReq);
1788 GMMR3AllocatePagesCleanup(pReq);
1789 pgmUnlock(pVM);
1790 return rc;
1791}
1792
1793
1794/**
1795 * \#PF Handler callback for ROM write accesses.
1796 *
1797 * @returns VINF_SUCCESS if the handler have carried out the operation.
1798 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1799 * @param pVM VM Handle.
1800 * @param GCPhys The physical address the guest is writing to.
1801 * @param pvPhys The HC mapping of that address.
1802 * @param pvBuf What the guest is reading/writing.
1803 * @param cbBuf How much it's reading/writing.
1804 * @param enmAccessType The access type.
1805 * @param pvUser User argument.
1806 */
1807static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1808{
1809 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
1810 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1811 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
1812 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1813 switch (pRomPage->enmProt)
1814 {
1815 /*
1816 * Ignore.
1817 */
1818 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
1819 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
1820 return VINF_SUCCESS;
1821
1822 /*
1823 * Write to the ram page.
1824 */
1825 case PGMROMPROT_READ_ROM_WRITE_RAM:
1826 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
1827 {
1828 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
1829 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
1830
1831 /*
1832 * Take the lock, do lazy allocation, map the page and copy the data.
1833 *
1834 * Note that we have to bypass the mapping TLB since it works on
1835 * guest physical addresses and entering the shadow page would
1836 * kind of screw things up...
1837 */
1838 int rc = pgmLock(pVM);
1839 AssertRC(rc);
1840
1841 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
1842 {
1843 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
1844 if (RT_FAILURE(rc))
1845 {
1846 pgmUnlock(pVM);
1847 return rc;
1848 }
1849 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1850 }
1851
1852 void *pvDstPage;
1853 PPGMPAGEMAP pMapIgnored;
1854 int rc2 = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
1855 if (RT_SUCCESS(rc2))
1856 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
1857 else
1858 rc = rc2;
1859
1860 pgmUnlock(pVM);
1861 return rc;
1862 }
1863
1864 default:
1865 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
1866 pRom->aPages[iPage].enmProt, iPage, GCPhys),
1867 VERR_INTERNAL_ERROR);
1868 }
1869}
1870
1871
1872/**
1873 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
1874 * and verify that the virgin part is untouched.
1875 *
1876 * This is done after the normal memory has been cleared.
1877 *
1878 * ASSUMES that the caller owns the PGM lock.
1879 *
1880 * @param pVM The VM handle.
1881 */
1882int pgmR3PhysRomReset(PVM pVM)
1883{
1884 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1885 {
1886 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
1887
1888 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1889 {
1890 /*
1891 * Reset the physical handler.
1892 */
1893 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
1894 AssertRCReturn(rc, rc);
1895
1896 /*
1897 * What we do with the shadow pages depends on the memory
1898 * preallocation option. If not enabled, we'll just throw
1899 * out all the dirty pages and replace them by the zero page.
1900 */
1901 if (!pVM->pgm.s.fRamPreAlloc)
1902 {
1903 /* Count dirty shadow pages. */
1904 uint32_t cDirty = 0;
1905 uint32_t iPage = cPages;
1906 while (iPage-- > 0)
1907 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1908 cDirty++;
1909 if (cDirty)
1910 {
1911 /* Free the dirty pages. */
1912 PGMMFREEPAGESREQ pReq;
1913 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
1914 AssertRCReturn(rc, rc);
1915
1916 uint32_t iReqPage = 0;
1917 for (iPage = 0; iPage < cPages; iPage++)
1918 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1919 {
1920 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
1921 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
1922 iReqPage++;
1923 }
1924
1925 rc = GMMR3FreePagesPerform(pVM, pReq, cDirty);
1926 GMMR3FreePagesCleanup(pReq);
1927 AssertRCReturn(rc, rc);
1928
1929 /* setup the zero page. */
1930 for (iPage = 0; iPage < cPages; iPage++)
1931 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1932 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1933
1934 /* update the page count stats. */
1935 pVM->pgm.s.cPrivatePages -= cDirty;
1936 pVM->pgm.s.cZeroPages += cDirty;
1937 }
1938 }
1939 else
1940 {
1941 /* clear all the pages. */
1942 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1943 {
1944 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO);
1945
1946 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1947 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
1948 if (RT_FAILURE(rc))
1949 break;
1950
1951 void *pvDstPage;
1952 PPGMPAGEMAP pMapIgnored;
1953 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
1954 if (RT_FAILURE(rc))
1955 break;
1956 ASMMemZeroPage(pvDstPage);
1957 }
1958 AssertRCReturn(rc, rc);
1959 }
1960 }
1961
1962#ifdef VBOX_STRICT
1963 /*
1964 * Verify that the virgin page is unchanged if possible.
1965 */
1966 if (pRom->pvOriginal)
1967 {
1968 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
1969 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
1970 {
1971 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1972 PPGMPAGEMAP pMapIgnored;
1973 void *pvDstPage;
1974 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
1975 if (RT_FAILURE(rc))
1976 break;
1977 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
1978 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
1979 GCPhys, pRom->pszDesc));
1980 }
1981 }
1982#endif
1983 }
1984
1985 return VINF_SUCCESS;
1986}
1987
1988
1989/**
1990 * Change the shadowing of a range of ROM pages.
1991 *
1992 * This is intended for implementing chipset specific memory registers
1993 * and will not be very strict about the input. It will silently ignore
1994 * any pages that are not the part of a shadowed ROM.
1995 *
1996 * @returns VBox status code.
1997 * @param pVM Pointer to the shared VM structure.
1998 * @param GCPhys Where to start. Page aligned.
1999 * @param cb How much to change. Page aligned.
2000 * @param enmProt The new ROM protection.
2001 */
2002VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
2003{
2004 /*
2005 * Check input
2006 */
2007 if (!cb)
2008 return VINF_SUCCESS;
2009 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2010 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2011 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2012 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2013 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
2014
2015 /*
2016 * Process the request.
2017 */
2018 bool fFlushedPool = false;
2019 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2020 if ( GCPhys <= pRom->GCPhysLast
2021 && GCPhysLast >= pRom->GCPhys)
2022 {
2023 /*
2024 * Iterate the relevant pages and the ncessary make changes.
2025 */
2026 bool fChanges = false;
2027 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
2028 ? pRom->cb >> PAGE_SHIFT
2029 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
2030 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
2031 iPage < cPages;
2032 iPage++)
2033 {
2034 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2035 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
2036 {
2037 fChanges = true;
2038
2039 /* flush the page pool first so we don't leave any usage references dangling. */
2040 if (!fFlushedPool)
2041 {
2042 pgmPoolFlushAll(pVM);
2043 fFlushedPool = true;
2044 }
2045
2046 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2047 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2048 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
2049
2050 *pOld = *pRamPage;
2051 *pRamPage = *pNew;
2052 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
2053 }
2054 }
2055
2056 /*
2057 * Reset the access handler if we made changes, no need
2058 * to optimize this.
2059 */
2060 if (fChanges)
2061 {
2062 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
2063 AssertRCReturn(rc, rc);
2064 }
2065
2066 /* Advance - cb isn't updated. */
2067 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
2068 }
2069
2070 return VINF_SUCCESS;
2071}
2072
2073#ifndef VBOX_WITH_NEW_PHYS_CODE
2074
2075/**
2076 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
2077 * registration APIs calls to inform PGM about memory registrations.
2078 *
2079 * It registers the physical memory range with PGM. MM is responsible
2080 * for the toplevel things - allocation and locking - while PGM is taking
2081 * care of all the details and implements the physical address space virtualization.
2082 *
2083 * @returns VBox status.
2084 * @param pVM The VM handle.
2085 * @param pvRam HC virtual address of the RAM range. (page aligned)
2086 * @param GCPhys GC physical address of the RAM range. (page aligned)
2087 * @param cb Size of the RAM range. (page aligned)
2088 * @param fFlags Flags, MM_RAM_*.
2089 * @param paPages Pointer an array of physical page descriptors.
2090 * @param pszDesc Description string.
2091 */
2092VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
2093{
2094 /*
2095 * Validate input.
2096 * (Not so important because callers are only MMR3PhysRegister()
2097 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
2098 */
2099 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
2100
2101 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
2102 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
2103 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
2104 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
2105 Assert(!(fFlags & ~0xfff));
2106 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2107 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
2108 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
2109 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2110 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2111 if (GCPhysLast < GCPhys)
2112 {
2113 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2114 return VERR_INVALID_PARAMETER;
2115 }
2116
2117 /*
2118 * Find range location and check for conflicts.
2119 */
2120 PPGMRAMRANGE pPrev = NULL;
2121 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
2122 while (pCur)
2123 {
2124 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
2125 {
2126 AssertMsgFailed(("Conflict! This cannot happen!\n"));
2127 return VERR_PGM_RAM_CONFLICT;
2128 }
2129 if (GCPhysLast < pCur->GCPhys)
2130 break;
2131
2132 /* next */
2133 pPrev = pCur;
2134 pCur = pCur->pNextR3;
2135 }
2136
2137 /*
2138 * Allocate RAM range.
2139 * Small ranges are allocated from the heap, big ones have separate mappings.
2140 */
2141 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
2142 PPGMRAMRANGE pNew;
2143 int rc = VERR_NO_MEMORY;
2144 if (cbRam > PAGE_SIZE / 2)
2145 { /* large */
2146 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
2147 rc = MMR3HyperAllocOnceNoRel(pVM, cbRam, PAGE_SIZE, MM_TAG_PGM_PHYS, (void **)&pNew);
2148 AssertMsgRC(rc, ("MMR3HyperAllocOnceNoRel(,%#x,,) -> %Rrc\n", cbRam, rc));
2149 }
2150 else
2151 { /* small */
2152 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
2153 AssertMsgRC(rc, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, rc));
2154 }
2155 if (RT_SUCCESS(rc))
2156 {
2157 /*
2158 * Initialize the range.
2159 */
2160 pNew->pvR3 = pvRam;
2161 pNew->GCPhys = GCPhys;
2162 pNew->GCPhysLast = GCPhysLast;
2163 pNew->cb = cb;
2164 pNew->fFlags = fFlags;
2165 pNew->paChunkR3Ptrs = NULL;
2166
2167 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
2168 if (paPages)
2169 {
2170 while (iPage-- > 0)
2171 {
2172 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
2173 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
2174 PGM_PAGE_STATE_ALLOCATED);
2175 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
2176 }
2177 }
2178 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2179 {
2180 /* Allocate memory for chunk to HC ptr lookup array. */
2181 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
2182 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, cb), rc);
2183
2184 /* Physical memory will be allocated on demand. */
2185 while (iPage-- > 0)
2186 {
2187 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
2188 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
2189 }
2190 }
2191 else
2192 {
2193 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
2194 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
2195 while (iPage-- > 0)
2196 {
2197 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
2198 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
2199 }
2200 }
2201
2202 /*
2203 * Insert the new RAM range.
2204 */
2205 pgmLock(pVM);
2206 pNew->pNextR3 = pCur;
2207 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
2208 pNew->pNextRC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTRCPTR;
2209 if (pPrev)
2210 {
2211 pPrev->pNextR3 = pNew;
2212 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
2213 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
2214 }
2215 else
2216 {
2217 pVM->pgm.s.pRamRangesR3 = pNew;
2218 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
2219 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
2220 }
2221 pgmUnlock(pVM);
2222 }
2223 return rc;
2224}
2225
2226
2227/**
2228 * Register a chunk of a the physical memory range with PGM. MM is responsible
2229 * for the toplevel things - allocation and locking - while PGM is taking
2230 * care of all the details and implements the physical address space virtualization.
2231 *
2232 *
2233 * @returns VBox status.
2234 * @param pVM The VM handle.
2235 * @param pvRam HC virtual address of the RAM range. (page aligned)
2236 * @param GCPhys GC physical address of the RAM range. (page aligned)
2237 * @param cb Size of the RAM range. (page aligned)
2238 * @param fFlags Flags, MM_RAM_*.
2239 * @param paPages Pointer an array of physical page descriptors.
2240 * @param pszDesc Description string.
2241 */
2242VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
2243{
2244 NOREF(pszDesc);
2245
2246 /*
2247 * Validate input.
2248 * (Not so important because callers are only MMR3PhysRegister()
2249 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
2250 */
2251 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
2252
2253 Assert(paPages);
2254 Assert(pvRam);
2255 Assert(!(fFlags & ~0xfff));
2256 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2257 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
2258 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
2259 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2260 Assert(VM_IS_EMT(pVM));
2261 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2262 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2263
2264 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2265 if (GCPhysLast < GCPhys)
2266 {
2267 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2268 return VERR_INVALID_PARAMETER;
2269 }
2270
2271 /*
2272 * Find existing range location.
2273 */
2274 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2275 while (pRam)
2276 {
2277 RTGCPHYS off = GCPhys - pRam->GCPhys;
2278 if ( off < pRam->cb
2279 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
2280 break;
2281
2282 pRam = pRam->CTX_SUFF(pNext);
2283 }
2284 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
2285
2286 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2287 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
2288 if (paPages)
2289 {
2290 while (iPage-- > 0)
2291 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
2292 }
2293 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
2294 pRam->paChunkR3Ptrs[off] = (uintptr_t)pvRam;
2295
2296 /* Notify the recompiler. */
2297 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
2298
2299 return VINF_SUCCESS;
2300}
2301
2302
2303/**
2304 * Allocate missing physical pages for an existing guest RAM range.
2305 *
2306 * @returns VBox status.
2307 * @param pVM The VM handle.
2308 * @param GCPhys GC physical address of the RAM range. (page aligned)
2309 */
2310VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
2311{
2312 RTGCPHYS GCPhys = *pGCPhys;
2313
2314 /*
2315 * Walk range list.
2316 */
2317 pgmLock(pVM);
2318
2319 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2320 while (pRam)
2321 {
2322 RTGCPHYS off = GCPhys - pRam->GCPhys;
2323 if ( off < pRam->cb
2324 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
2325 {
2326 bool fRangeExists = false;
2327 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
2328
2329 /* Note: A request made from another thread may end up in EMT after somebody else has already allocated the range. */
2330 if (pRam->paChunkR3Ptrs[off])
2331 fRangeExists = true;
2332
2333 pgmUnlock(pVM);
2334 if (fRangeExists)
2335 return VINF_SUCCESS;
2336 return pgmr3PhysGrowRange(pVM, GCPhys);
2337 }
2338
2339 pRam = pRam->CTX_SUFF(pNext);
2340 }
2341 pgmUnlock(pVM);
2342 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2343}
2344
2345
2346/**
2347 * Allocate missing physical pages for an existing guest RAM range.
2348 *
2349 * @returns VBox status.
2350 * @param pVM The VM handle.
2351 * @param pRamRange RAM range
2352 * @param GCPhys GC physical address of the RAM range. (page aligned)
2353 */
2354int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
2355{
2356 void *pvRam;
2357 int rc;
2358
2359 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
2360 if (!VM_IS_EMT(pVM))
2361 {
2362 PVMREQ pReq;
2363 const RTGCPHYS GCPhysParam = GCPhys;
2364
2365 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
2366
2367 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
2368 if (RT_SUCCESS(rc))
2369 {
2370 rc = pReq->iStatus;
2371 VMR3ReqFree(pReq);
2372 }
2373 return rc;
2374 }
2375
2376 /* Round down to chunk boundary */
2377 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
2378
2379 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DynRamGrow);
2380 STAM_COUNTER_ADD(&pVM->pgm.s.StatR3DynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
2381
2382 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %RGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
2383
2384 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
2385
2386 for (;;)
2387 {
2388 rc = SUPPageAlloc(cPages, &pvRam);
2389 if (RT_SUCCESS(rc))
2390 {
2391 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
2392 if (RT_SUCCESS(rc))
2393 return rc;
2394
2395 SUPPageFree(pvRam, cPages);
2396 }
2397
2398 VMSTATE enmVMState = VMR3GetState(pVM);
2399 if (enmVMState != VMSTATE_RUNNING)
2400 {
2401 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %RGp!\n", GCPhys));
2402 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %RGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
2403 return rc;
2404 }
2405
2406 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
2407
2408 /* Pause first, then inform Main. */
2409 rc = VMR3SuspendNoSave(pVM);
2410 AssertRC(rc);
2411
2412 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM");
2413
2414 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
2415 rc = VMR3WaitForResume(pVM);
2416
2417 /* Retry */
2418 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
2419 }
2420}
2421
2422
2423/**
2424 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
2425 * flags of existing RAM ranges.
2426 *
2427 * @returns VBox status.
2428 * @param pVM The VM handle.
2429 * @param GCPhys GC physical address of the RAM range. (page aligned)
2430 * @param cb Size of the RAM range. (page aligned)
2431 * @param fFlags The Or flags, MM_RAM_* \#defines.
2432 * @param fMask The and mask for the flags.
2433 */
2434VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
2435{
2436 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
2437
2438 /*
2439 * Validate input.
2440 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
2441 */
2442 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
2443 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2444 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2445 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2446 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2447
2448 /*
2449 * Lookup the range.
2450 */
2451 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2452 while (pRam && GCPhys > pRam->GCPhysLast)
2453 pRam = pRam->CTX_SUFF(pNext);
2454 if ( !pRam
2455 || GCPhys > pRam->GCPhysLast
2456 || GCPhysLast < pRam->GCPhys)
2457 {
2458 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
2459 return VERR_INVALID_PARAMETER;
2460 }
2461
2462 /*
2463 * Update the requested flags.
2464 */
2465 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
2466 | fMask;
2467 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
2468 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2469 for ( ; iPage < iPageEnd; iPage++)
2470 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
2471
2472 return VINF_SUCCESS;
2473}
2474
2475#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2476
2477/**
2478 * Sets the Address Gate 20 state.
2479 *
2480 * @param pVM VM handle.
2481 * @param fEnable True if the gate should be enabled.
2482 * False if the gate should be disabled.
2483 */
2484VMMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
2485{
2486 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
2487 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
2488 {
2489 pVM->pgm.s.fA20Enabled = fEnable;
2490 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2491 REMR3A20Set(pVM, fEnable);
2492 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
2493 }
2494}
2495
2496
2497/**
2498 * Tree enumeration callback for dealing with age rollover.
2499 * It will perform a simple compression of the current age.
2500 */
2501static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2502{
2503 /* Age compression - ASSUMES iNow == 4. */
2504 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2505 if (pChunk->iAge >= UINT32_C(0xffffff00))
2506 pChunk->iAge = 3;
2507 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2508 pChunk->iAge = 2;
2509 else if (pChunk->iAge)
2510 pChunk->iAge = 1;
2511 else /* iAge = 0 */
2512 pChunk->iAge = 4;
2513
2514 /* reinsert */
2515 PVM pVM = (PVM)pvUser;
2516 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2517 pChunk->AgeCore.Key = pChunk->iAge;
2518 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2519 return 0;
2520}
2521
2522
2523/**
2524 * Tree enumeration callback that updates the chunks that have
2525 * been used since the last
2526 */
2527static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2528{
2529 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2530 if (!pChunk->iAge)
2531 {
2532 PVM pVM = (PVM)pvUser;
2533 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2534 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2535 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2536 }
2537
2538 return 0;
2539}
2540
2541
2542/**
2543 * Performs ageing of the ring-3 chunk mappings.
2544 *
2545 * @param pVM The VM handle.
2546 */
2547VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2548{
2549 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2550 pVM->pgm.s.ChunkR3Map.iNow++;
2551 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2552 {
2553 pVM->pgm.s.ChunkR3Map.iNow = 4;
2554 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2555 }
2556 else
2557 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2558}
2559
2560
2561/**
2562 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2563 */
2564typedef struct PGMR3PHYSCHUNKUNMAPCB
2565{
2566 PVM pVM; /**< The VM handle. */
2567 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2568} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2569
2570
2571/**
2572 * Callback used to find the mapping that's been unused for
2573 * the longest time.
2574 */
2575static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2576{
2577 do
2578 {
2579 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2580 if ( pChunk->iAge
2581 && !pChunk->cRefs)
2582 {
2583 /*
2584 * Check that it's not in any of the TLBs.
2585 */
2586 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2587 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2588 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2589 {
2590 pChunk = NULL;
2591 break;
2592 }
2593 if (pChunk)
2594 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2595 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2596 {
2597 pChunk = NULL;
2598 break;
2599 }
2600 if (pChunk)
2601 {
2602 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2603 return 1; /* done */
2604 }
2605 }
2606
2607 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2608 pNode = pNode->pList;
2609 } while (pNode);
2610 return 0;
2611}
2612
2613
2614/**
2615 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2616 *
2617 * The candidate will not be part of any TLBs, so no need to flush
2618 * anything afterwards.
2619 *
2620 * @returns Chunk id.
2621 * @param pVM The VM handle.
2622 */
2623static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2624{
2625 /*
2626 * Do tree ageing first?
2627 */
2628 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2629 PGMR3PhysChunkAgeing(pVM);
2630
2631 /*
2632 * Enumerate the age tree starting with the left most node.
2633 */
2634 PGMR3PHYSCHUNKUNMAPCB Args;
2635 Args.pVM = pVM;
2636 Args.pChunk = NULL;
2637 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2638 return Args.pChunk->Core.Key;
2639 return INT32_MAX;
2640}
2641
2642
2643/**
2644 * Maps the given chunk into the ring-3 mapping cache.
2645 *
2646 * This will call ring-0.
2647 *
2648 * @returns VBox status code.
2649 * @param pVM The VM handle.
2650 * @param idChunk The chunk in question.
2651 * @param ppChunk Where to store the chunk tracking structure.
2652 *
2653 * @remarks Called from within the PGM critical section.
2654 */
2655int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
2656{
2657 int rc;
2658 /*
2659 * Allocate a new tracking structure first.
2660 */
2661#if 0 /* for later when we've got a separate mapping method for ring-0. */
2662 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
2663 AssertReturn(pChunk, VERR_NO_MEMORY);
2664#else
2665 PPGMCHUNKR3MAP pChunk;
2666 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
2667 AssertRCReturn(rc, rc);
2668#endif
2669 pChunk->Core.Key = idChunk;
2670 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
2671 pChunk->iAge = 0;
2672 pChunk->cRefs = 0;
2673 pChunk->cPermRefs = 0;
2674 pChunk->pv = NULL;
2675
2676 /*
2677 * Request the ring-0 part to map the chunk in question and if
2678 * necessary unmap another one to make space in the mapping cache.
2679 */
2680 GMMMAPUNMAPCHUNKREQ Req;
2681 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
2682 Req.Hdr.cbReq = sizeof(Req);
2683 Req.pvR3 = NULL;
2684 Req.idChunkMap = idChunk;
2685 Req.idChunkUnmap = NIL_GMM_CHUNKID;
2686 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
2687 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
2688 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
2689 if (RT_SUCCESS(rc))
2690 {
2691 /*
2692 * Update the tree.
2693 */
2694 /* insert the new one. */
2695 AssertPtr(Req.pvR3);
2696 pChunk->pv = Req.pvR3;
2697 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2698 AssertRelease(fRc);
2699 pVM->pgm.s.ChunkR3Map.c++;
2700
2701 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2702 AssertRelease(fRc);
2703
2704 /* remove the unmapped one. */
2705 if (Req.idChunkUnmap != NIL_GMM_CHUNKID)
2706 {
2707 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2708 AssertRelease(pUnmappedChunk);
2709 pUnmappedChunk->pv = NULL;
2710 pUnmappedChunk->Core.Key = UINT32_MAX;
2711#if 0 /* for later when we've got a separate mapping method for ring-0. */
2712 MMR3HeapFree(pUnmappedChunk);
2713#else
2714 MMHyperFree(pVM, pUnmappedChunk);
2715#endif
2716 pVM->pgm.s.ChunkR3Map.c--;
2717 }
2718 }
2719 else
2720 {
2721 AssertRC(rc);
2722#if 0 /* for later when we've got a separate mapping method for ring-0. */
2723 MMR3HeapFree(pChunk);
2724#else
2725 MMHyperFree(pVM, pChunk);
2726#endif
2727 pChunk = NULL;
2728 }
2729
2730 *ppChunk = pChunk;
2731 return rc;
2732}
2733
2734
2735/**
2736 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
2737 *
2738 * @returns see pgmR3PhysChunkMap.
2739 * @param pVM The VM handle.
2740 * @param idChunk The chunk to map.
2741 */
2742VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
2743{
2744 PPGMCHUNKR3MAP pChunk;
2745 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
2746}
2747
2748
2749/**
2750 * Invalidates the TLB for the ring-3 mapping cache.
2751 *
2752 * @param pVM The VM handle.
2753 */
2754VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
2755{
2756 pgmLock(pVM);
2757 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2758 {
2759 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
2760 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
2761 }
2762 pgmUnlock(pVM);
2763}
2764
2765
2766/**
2767 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
2768 *
2769 * @returns The following VBox status codes.
2770 * @retval VINF_SUCCESS on success. FF cleared.
2771 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
2772 *
2773 * @param pVM The VM handle.
2774 */
2775VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
2776{
2777 pgmLock(pVM);
2778
2779 /*
2780 * Allocate more pages, noting down the index of the first new page.
2781 */
2782 uint32_t iClear = pVM->pgm.s.cHandyPages;
2783 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
2784 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
2785 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2786 while (rc == VERR_GMM_SEED_ME)
2787 {
2788 void *pvChunk;
2789 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
2790 if (RT_SUCCESS(rc))
2791 {
2792 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
2793 if (RT_FAILURE(rc))
2794 SUPPageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
2795 }
2796 if (RT_SUCCESS(rc))
2797 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2798 }
2799
2800 /*
2801 * Clear the pages.
2802 */
2803 if (RT_SUCCESS(rc))
2804 {
2805 while (iClear < pVM->pgm.s.cHandyPages)
2806 {
2807 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
2808 void *pv;
2809 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
2810 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
2811 ASMMemZeroPage(pv);
2812 iClear++;
2813 }
2814
2815 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
2816 }
2817 else
2818 {
2819 LogRel(("PGM: Failed to procure handy pages, rc=%Rrc cHandyPages=%u\n",
2820 rc, pVM->pgm.s.cHandyPages));
2821 rc = VERR_EM_NO_MEMORY;
2822 //rc = VINF_EM_NO_MEMORY;
2823 //VM_FF_SET(pVM, VM_FF_PGM_WE_ARE_SCREWED?);
2824 }
2825
2826/** @todo Do proper VERR_EM_NO_MEMORY reporting. */
2827 AssertMsg( pVM->pgm.s.cHandyPages == RT_ELEMENTS(pVM->pgm.s.aHandyPages)
2828 || rc != VINF_SUCCESS, ("%d rc=%Rrc\n", pVM->pgm.s.cHandyPages, rc));
2829 pgmUnlock(pVM);
2830 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY || rc == VERR_EM_NO_MEMORY);
2831 return rc;
2832}
2833
2834
2835/**
2836 * Frees the specified RAM page and replaces it with the ZERO page.
2837 *
2838 * This is used by ballooning, remapping MMIO2 and RAM reset.
2839 *
2840 * @param pVM Pointer to the shared VM structure.
2841 * @param pReq Pointer to the request.
2842 * @param pPage Pointer to the page structure.
2843 * @param GCPhys The guest physical address of the page, if applicable.
2844 *
2845 * @remarks The caller must own the PGM lock.
2846 */
2847static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
2848{
2849 /*
2850 * Assert sanity.
2851 */
2852 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
2853 if (RT_UNLIKELY(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM))
2854 {
2855 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
2856 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
2857 }
2858
2859 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
2860 return VINF_SUCCESS;
2861
2862 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
2863 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
2864 || idPage > GMM_PAGEID_LAST
2865 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
2866 {
2867 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
2868 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
2869 }
2870
2871 /* update page count stats. */
2872 if (PGM_PAGE_IS_SHARED(pPage))
2873 pVM->pgm.s.cSharedPages--;
2874 else
2875 pVM->pgm.s.cPrivatePages--;
2876 pVM->pgm.s.cZeroPages++;
2877
2878 /*
2879 * pPage = ZERO page.
2880 */
2881 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
2882 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
2883 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
2884
2885 /*
2886 * Make sure it's not in the handy page array.
2887 */
2888 uint32_t i = pVM->pgm.s.cHandyPages;
2889 while (i < RT_ELEMENTS(pVM->pgm.s.aHandyPages))
2890 {
2891 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
2892 {
2893 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
2894 break;
2895 }
2896 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
2897 {
2898 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
2899 break;
2900 }
2901 i++;
2902 }
2903
2904 /*
2905 * Push it onto the page array.
2906 */
2907 uint32_t iPage = *pcPendingPages;
2908 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
2909 *pcPendingPages += 1;
2910
2911 pReq->aPages[iPage].idPage = idPage;
2912
2913 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
2914 return VINF_SUCCESS;
2915
2916 /*
2917 * Flush the pages.
2918 */
2919 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
2920 if (RT_SUCCESS(rc))
2921 {
2922 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2923 *pcPendingPages = 0;
2924 }
2925 return rc;
2926}
2927
2928
2929/**
2930 * Converts a GC physical address to a HC ring-3 pointer, with some
2931 * additional checks.
2932 *
2933 * @returns VBox status code.
2934 * @retval VINF_SUCCESS on success.
2935 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
2936 * access handler of some kind.
2937 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
2938 * accesses or is odd in any way.
2939 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
2940 *
2941 * @param pVM The VM handle.
2942 * @param GCPhys The GC physical address to convert.
2943 * @param fWritable Whether write access is required.
2944 * @param ppv Where to store the pointer corresponding to GCPhys on
2945 * success.
2946 */
2947VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
2948{
2949 pgmLock(pVM);
2950
2951 PPGMRAMRANGE pRam;
2952 PPGMPAGE pPage;
2953 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
2954 if (RT_SUCCESS(rc))
2955 {
2956#ifdef VBOX_WITH_NEW_PHYS_CODE
2957 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
2958 rc = VINF_SUCCESS;
2959 else
2960 {
2961 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
2962 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
2963 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2964 {
2965 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
2966 * in -norawr0 mode. */
2967 if (fWritable)
2968 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
2969 }
2970 else
2971 {
2972 /* Temporariliy disabled phycial handler(s), since the recompiler
2973 doesn't get notified when it's reset we'll have to pretend its
2974 operating normally. */
2975 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
2976 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
2977 else
2978 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
2979 }
2980 }
2981 if (RT_SUCCESS(rc))
2982 {
2983 int rc2;
2984
2985 /* Make sure what we return is writable. */
2986 if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
2987 switch (PGM_PAGE_GET_STATE(pPage))
2988 {
2989 case PGM_PAGE_STATE_ALLOCATED:
2990 break;
2991 case PGM_PAGE_STATE_ZERO:
2992 case PGM_PAGE_STATE_SHARED:
2993 case PGM_PAGE_STATE_WRITE_MONITORED:
2994 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
2995 AssertLogRelRCReturn(rc2, rc2);
2996 break;
2997 }
2998
2999 /* Get a ring-3 mapping of the address. */
3000 PPGMPAGER3MAPTLBE pTlbe;
3001 rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
3002 AssertLogRelRCReturn(rc2, rc2);
3003 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
3004 /** @todo mapping/locking hell; this isn't horribly efficient since
3005 * pgmPhysPageLoadIntoTlb will repeate the lookup we've done here. */
3006
3007 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3008 }
3009 else
3010 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3011
3012 /* else: handler catching all access, no pointer returned. */
3013
3014#else
3015 if (0)
3016 /* nothing */;
3017 else if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3018 {
3019 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3020 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3021 else if (fWritable && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3022 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3023 else
3024 {
3025 /* Temporariliy disabled phycial handler(s), since the recompiler
3026 doesn't get notified when it's reset we'll have to pretend its
3027 operating normally. */
3028 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
3029 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3030 else
3031 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
3032 }
3033 }
3034 else
3035 rc = VINF_SUCCESS;
3036 if (RT_SUCCESS(rc))
3037 {
3038 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3039 {
3040 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
3041 RTGCPHYS off = GCPhys - pRam->GCPhys;
3042 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3043 *ppv = (void *)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3044 }
3045 else if (RT_LIKELY(pRam->pvR3))
3046 {
3047 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
3048 RTGCPHYS off = GCPhys - pRam->GCPhys;
3049 *ppv = (uint8_t *)pRam->pvR3 + off;
3050 }
3051 else
3052 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3053 }
3054#endif /* !VBOX_WITH_NEW_PHYS_CODE */
3055 }
3056 else
3057 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3058
3059 pgmUnlock(pVM);
3060 return rc;
3061}
3062
3063
3064
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette