VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 17509

Last change on this file since 17509 was 17509, checked in by vboxsync, 16 years ago

PGM: Moved the page pool PT flushing code in the access handler bits to where it belongs and called it pgmPoolTrackFlushGCPhys. Fixed a status code corruption bug in PGMR3PhysTlbGCPhys2Ptr (new phys). Made lazy zero page replacement code work in the new code, it's disabled by default because it frequently requires flushing the shadow page pool because the tracking code assuming the HCPhys of a PGMPAGE is unique and never shared.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 97.6 KB
Line 
1/* $Id: PGMPhys.cpp 17509 2009-03-07 01:30:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/csam.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <VBox/log.h>
44#include <iprt/thread.h>
45#include <iprt/string.h>
46
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** The number of pages to free in one batch. */
52#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
53
54
55/*******************************************************************************
56* Internal Functions *
57*******************************************************************************/
58static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
59static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
60
61
62/*
63 * PGMR3PhysReadU8-64
64 * PGMR3PhysWriteU8-64
65 */
66#define PGMPHYSFN_READNAME PGMR3PhysReadU8
67#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
68#define PGMPHYS_DATASIZE 1
69#define PGMPHYS_DATATYPE uint8_t
70#include "PGMPhysRWTmpl.h"
71
72#define PGMPHYSFN_READNAME PGMR3PhysReadU16
73#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
74#define PGMPHYS_DATASIZE 2
75#define PGMPHYS_DATATYPE uint16_t
76#include "PGMPhysRWTmpl.h"
77
78#define PGMPHYSFN_READNAME PGMR3PhysReadU32
79#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
80#define PGMPHYS_DATASIZE 4
81#define PGMPHYS_DATATYPE uint32_t
82#include "PGMPhysRWTmpl.h"
83
84#define PGMPHYSFN_READNAME PGMR3PhysReadU64
85#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
86#define PGMPHYS_DATASIZE 8
87#define PGMPHYS_DATATYPE uint64_t
88#include "PGMPhysRWTmpl.h"
89
90
91
92/**
93 * Links a new RAM range into the list.
94 *
95 * @param pVM Pointer to the shared VM structure.
96 * @param pNew Pointer to the new list entry.
97 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
98 */
99static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
100{
101 pgmLock(pVM);
102
103 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
104 pNew->pNextR3 = pRam;
105 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
106 pNew->pNextRC = pRam ? MMHyperCCToRC(pVM, pRam) : NIL_RTRCPTR;
107
108 if (pPrev)
109 {
110 pPrev->pNextR3 = pNew;
111 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
112 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
113 }
114 else
115 {
116 pVM->pgm.s.pRamRangesR3 = pNew;
117 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
118 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
119 }
120
121 pgmUnlock(pVM);
122}
123
124
125/**
126 * Unlink an existing RAM range from the list.
127 *
128 * @param pVM Pointer to the shared VM structure.
129 * @param pRam Pointer to the new list entry.
130 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
131 */
132static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
133{
134 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
135
136 pgmLock(pVM);
137
138 PPGMRAMRANGE pNext = pRam->pNextR3;
139 if (pPrev)
140 {
141 pPrev->pNextR3 = pNext;
142 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
143 pPrev->pNextRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
144 }
145 else
146 {
147 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
148 pVM->pgm.s.pRamRangesR3 = pNext;
149 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
150 pVM->pgm.s.pRamRangesRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
151 }
152
153 pgmUnlock(pVM);
154}
155
156
157/**
158 * Unlink an existing RAM range from the list.
159 *
160 * @param pVM Pointer to the shared VM structure.
161 * @param pRam Pointer to the new list entry.
162 */
163static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
164{
165 /* find prev. */
166 PPGMRAMRANGE pPrev = NULL;
167 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
168 while (pCur != pRam)
169 {
170 pPrev = pCur;
171 pCur = pCur->pNextR3;
172 }
173 AssertFatal(pCur);
174
175 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
176}
177
178
179/**
180 * Sets up a range RAM.
181 *
182 * This will check for conflicting registrations, make a resource
183 * reservation for the memory (with GMM), and setup the per-page
184 * tracking structures (PGMPAGE).
185 *
186 * @returns VBox stutus code.
187 * @param pVM Pointer to the shared VM structure.
188 * @param GCPhys The physical address of the RAM.
189 * @param cb The size of the RAM.
190 * @param pszDesc The description - not copied, so, don't free or change it.
191 */
192VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
193{
194 /*
195 * Validate input.
196 */
197 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
198 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
199 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
200 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
201 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
202 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
203 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
204 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
205
206 /*
207 * Find range location and check for conflicts.
208 * (We don't lock here because the locking by EMT is only required on update.)
209 */
210 PPGMRAMRANGE pPrev = NULL;
211 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
212 while (pRam && GCPhysLast >= pRam->GCPhys)
213 {
214 if ( GCPhysLast >= pRam->GCPhys
215 && GCPhys <= pRam->GCPhysLast)
216 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
217 GCPhys, GCPhysLast, pszDesc,
218 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
219 VERR_PGM_RAM_CONFLICT);
220
221 /* next */
222 pPrev = pRam;
223 pRam = pRam->pNextR3;
224 }
225
226 /*
227 * Register it with GMM (the API bitches).
228 */
229 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
230 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
231 if (RT_FAILURE(rc))
232 return rc;
233
234 /*
235 * Allocate RAM range.
236 */
237 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
238 PPGMRAMRANGE pNew;
239 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
240 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
241
242 /*
243 * Initialize the range.
244 */
245 pNew->GCPhys = GCPhys;
246 pNew->GCPhysLast = GCPhysLast;
247 pNew->pszDesc = pszDesc;
248 pNew->cb = cb;
249 pNew->fFlags = 0;
250
251 pNew->pvR3 = NULL;
252#ifndef VBOX_WITH_NEW_PHYS_CODE
253 pNew->paChunkR3Ptrs = NULL;
254
255 /* Allocate memory for chunk to HC ptr lookup array. */
256 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
257 AssertRCReturn(rc, rc);
258 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
259
260#endif
261 RTGCPHYS iPage = cPages;
262 while (iPage-- > 0)
263 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
264
265 /*
266 * Insert the new RAM range.
267 */
268 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
269
270 /*
271 * Notify REM.
272 */
273#ifdef VBOX_WITH_NEW_PHYS_CODE
274 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
275#else
276 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
277#endif
278
279 return VINF_SUCCESS;
280}
281
282
283/**
284 * Resets (zeros) the RAM.
285 *
286 * ASSUMES that the caller owns the PGM lock.
287 *
288 * @returns VBox status code.
289 * @param pVM Pointer to the shared VM structure.
290 */
291int pgmR3PhysRamReset(PVM pVM)
292{
293#ifdef VBOX_WITH_NEW_PHYS_CODE
294 /*
295 * We batch up pages before freeing them.
296 */
297 uint32_t cPendingPages = 0;
298 PGMMFREEPAGESREQ pReq;
299 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
300 AssertLogRelRCReturn(rc, rc);
301#endif
302
303 /*
304 * Walk the ram ranges.
305 */
306 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
307 {
308 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
309#ifdef VBOX_WITH_NEW_PHYS_CODE
310 if (!pVM->pgm.s.fRamPreAlloc)
311 {
312 /* Replace all RAM pages by ZERO pages. */
313 while (iPage-- > 0)
314 {
315 PPGMPAGE pPage = &pRam->aPages[iPage];
316 switch (PGM_PAGE_GET_TYPE(pPage))
317 {
318 case PGMPAGETYPE_RAM:
319 if (!PGM_PAGE_IS_ZERO(pPage))
320 {
321 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
322 AssertLogRelRCReturn(rc, rc);
323 }
324 break;
325
326 case PGMPAGETYPE_MMIO2:
327 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
328 case PGMPAGETYPE_ROM:
329 case PGMPAGETYPE_MMIO:
330 break;
331 default:
332 AssertFailed();
333 }
334 } /* for each page */
335 }
336 else
337#endif
338 {
339 /* Zero the memory. */
340 while (iPage-- > 0)
341 {
342 PPGMPAGE pPage = &pRam->aPages[iPage];
343 switch (PGM_PAGE_GET_TYPE(pPage))
344 {
345#ifndef VBOX_WITH_NEW_PHYS_CODE
346 case PGMPAGETYPE_INVALID:
347 case PGMPAGETYPE_RAM:
348 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
349 {
350 /* shadow ram is reloaded elsewhere. */
351 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
352 continue;
353 }
354 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
355 {
356 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
357 if (pRam->paChunkR3Ptrs[iChunk])
358 ASMMemZero32((char *)pRam->paChunkR3Ptrs[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
359 }
360 else
361 ASMMemZero32((char *)pRam->pvR3 + (iPage << PAGE_SHIFT), PAGE_SIZE);
362 break;
363#else /* VBOX_WITH_NEW_PHYS_CODE */
364 case PGMPAGETYPE_RAM:
365 switch (PGM_PAGE_GET_STATE(pPage))
366 {
367 case PGM_PAGE_STATE_ZERO:
368 break;
369 case PGM_PAGE_STATE_SHARED:
370 case PGM_PAGE_STATE_WRITE_MONITORED:
371 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
372 AssertLogRelRCReturn(rc, rc);
373 case PGM_PAGE_STATE_ALLOCATED:
374 {
375 void *pvPage;
376 PPGMPAGEMAP pMapIgnored;
377 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pMapIgnored, &pvPage);
378 AssertLogRelRCReturn(rc, rc);
379 ASMMemZeroPage(pvPage);
380 break;
381 }
382 }
383 break;
384#endif /* VBOX_WITH_NEW_PHYS_CODE */
385
386 case PGMPAGETYPE_MMIO2:
387 case PGMPAGETYPE_ROM_SHADOW:
388 case PGMPAGETYPE_ROM:
389 case PGMPAGETYPE_MMIO:
390 break;
391 default:
392 AssertFailed();
393
394 }
395 } /* for each page */
396 }
397
398 }
399
400#ifdef VBOX_WITH_NEW_PHYS_CODE
401 /*
402 * Finish off any pages pending freeing.
403 */
404 if (cPendingPages)
405 {
406 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
407 AssertLogRelRCReturn(rc, rc);
408 }
409 GMMR3FreePagesCleanup(pReq);
410#endif
411
412
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * This is the interface IOM is using to register an MMIO region.
419 *
420 * It will check for conflicts and ensure that a RAM range structure
421 * is present before calling the PGMR3HandlerPhysicalRegister API to
422 * register the callbacks.
423 *
424 * @returns VBox status code.
425 *
426 * @param pVM Pointer to the shared VM structure.
427 * @param GCPhys The start of the MMIO region.
428 * @param cb The size of the MMIO region.
429 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
430 * @param pvUserR3 The user argument for R3.
431 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
432 * @param pvUserR0 The user argument for R0.
433 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
434 * @param pvUserRC The user argument for RC.
435 * @param pszDesc The description of the MMIO region.
436 */
437VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
438 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
439 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
440 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
441 R3PTRTYPE(const char *) pszDesc)
442{
443 /*
444 * Assert on some assumption.
445 */
446 VM_ASSERT_EMT(pVM);
447 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
448 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
449 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
450 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
451
452 /*
453 * Make sure there's a RAM range structure for the region.
454 */
455 int rc;
456 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
457 bool fRamExists = false;
458 PPGMRAMRANGE pRamPrev = NULL;
459 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
460 while (pRam && GCPhysLast >= pRam->GCPhys)
461 {
462 if ( GCPhysLast >= pRam->GCPhys
463 && GCPhys <= pRam->GCPhysLast)
464 {
465 /* Simplification: all within the same range. */
466 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
467 && GCPhysLast <= pRam->GCPhysLast,
468 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
469 GCPhys, GCPhysLast, pszDesc,
470 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
471 VERR_PGM_RAM_CONFLICT);
472
473 /* Check that it's all RAM or MMIO pages. */
474 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
475 uint32_t cLeft = cb >> PAGE_SHIFT;
476 while (cLeft-- > 0)
477 {
478 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
479 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
480 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
481 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
482 VERR_PGM_RAM_CONFLICT);
483 pPage++;
484 }
485
486 /* Looks good. */
487 fRamExists = true;
488 break;
489 }
490
491 /* next */
492 pRamPrev = pRam;
493 pRam = pRam->pNextR3;
494 }
495 PPGMRAMRANGE pNew;
496 if (fRamExists)
497 pNew = NULL;
498 else
499 {
500 /*
501 * No RAM range, insert an ad-hoc one.
502 *
503 * Note that we don't have to tell REM about this range because
504 * PGMHandlerPhysicalRegisterEx will do that for us.
505 */
506 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
507
508 const uint32_t cPages = cb >> PAGE_SHIFT;
509 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
510 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
511 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
512
513 /* Initialize the range. */
514 pNew->GCPhys = GCPhys;
515 pNew->GCPhysLast = GCPhysLast;
516 pNew->pszDesc = pszDesc;
517 pNew->cb = cb;
518 pNew->fFlags = 0; /* Some MMIO flag here? */
519
520 pNew->pvR3 = NULL;
521#ifndef VBOX_WITH_NEW_PHYS_CODE
522 pNew->paChunkR3Ptrs = NULL;
523#endif
524
525 uint32_t iPage = cPages;
526 while (iPage-- > 0)
527 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
528 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
529
530 /* link it */
531 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
532 }
533
534 /*
535 * Register the access handler.
536 */
537 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
538 pfnHandlerR3, pvUserR3,
539 pfnHandlerR0, pvUserR0,
540 pfnHandlerRC, pvUserRC, pszDesc);
541 if ( RT_FAILURE(rc)
542 && !fRamExists)
543 {
544 /* remove the ad-hoc range. */
545 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
546 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
547 MMHyperFree(pVM, pRam);
548 }
549
550 return rc;
551}
552
553
554/**
555 * This is the interface IOM is using to register an MMIO region.
556 *
557 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
558 * any ad-hoc PGMRAMRANGE left behind.
559 *
560 * @returns VBox status code.
561 * @param pVM Pointer to the shared VM structure.
562 * @param GCPhys The start of the MMIO region.
563 * @param cb The size of the MMIO region.
564 */
565VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
566{
567 VM_ASSERT_EMT(pVM);
568
569 /*
570 * First deregister the handler, then check if we should remove the ram range.
571 */
572 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
573 if (RT_SUCCESS(rc))
574 {
575 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
576 PPGMRAMRANGE pRamPrev = NULL;
577 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
578 while (pRam && GCPhysLast >= pRam->GCPhys)
579 {
580 /*if ( GCPhysLast >= pRam->GCPhys
581 && GCPhys <= pRam->GCPhysLast) - later */
582 if ( GCPhysLast == pRam->GCPhysLast
583 && GCPhys == pRam->GCPhys)
584 {
585 Assert(pRam->cb == cb);
586
587 /*
588 * See if all the pages are dead MMIO pages.
589 */
590 bool fAllMMIO = true;
591 PPGMPAGE pPage = &pRam->aPages[0];
592 uint32_t cLeft = cb >> PAGE_SHIFT;
593 while (cLeft-- > 0)
594 {
595 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
596 /*|| not-out-of-action later */)
597 {
598 fAllMMIO = false;
599 break;
600 }
601 pPage++;
602 }
603
604 /*
605 * Unlink it and free if it's all MMIO.
606 */
607 if (fAllMMIO)
608 {
609 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
610 GCPhys, GCPhysLast, pRam->pszDesc));
611
612 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
613 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
614 MMHyperFree(pVM, pRam);
615 }
616 break;
617 }
618
619 /* next */
620 pRamPrev = pRam;
621 pRam = pRam->pNextR3;
622 }
623 }
624
625 return rc;
626}
627
628
629/**
630 * Locate a MMIO2 range.
631 *
632 * @returns Pointer to the MMIO2 range.
633 * @param pVM Pointer to the shared VM structure.
634 * @param pDevIns The device instance owning the region.
635 * @param iRegion The region.
636 */
637DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
638{
639 /*
640 * Search the list.
641 */
642 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
643 if ( pCur->pDevInsR3 == pDevIns
644 && pCur->iRegion == iRegion)
645 return pCur;
646 return NULL;
647}
648
649
650/**
651 * Allocate and register an MMIO2 region.
652 *
653 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
654 * RAM associated with a device. It is also non-shared memory with a
655 * permanent ring-3 mapping and page backing (presently).
656 *
657 * A MMIO2 range may overlap with base memory if a lot of RAM
658 * is configured for the VM, in which case we'll drop the base
659 * memory pages. Presently we will make no attempt to preserve
660 * anything that happens to be present in the base memory that
661 * is replaced, this is of course incorrectly but it's too much
662 * effort.
663 *
664 * @returns VBox status code.
665 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
666 * @retval VERR_ALREADY_EXISTS if the region already exists.
667 *
668 * @param pVM Pointer to the shared VM structure.
669 * @param pDevIns The device instance owning the region.
670 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
671 * this number has to be the number of that region. Otherwise
672 * it can be any number safe UINT8_MAX.
673 * @param cb The size of the region. Must be page aligned.
674 * @param fFlags Reserved for future use, must be zero.
675 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
676 * @param pszDesc The description.
677 */
678VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
679{
680 /*
681 * Validate input.
682 */
683 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
684 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
685 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
686 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
687 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
688 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
689 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
690 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
691 AssertReturn(cb, VERR_INVALID_PARAMETER);
692 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
693
694 const uint32_t cPages = cb >> PAGE_SHIFT;
695 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
696 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
697
698 /*
699 * Try reserve and allocate the backing memory first as this is what is
700 * most likely to fail.
701 */
702 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
703 if (RT_FAILURE(rc))
704 return rc;
705
706 void *pvPages;
707 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
708 if (RT_SUCCESS(rc))
709 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
710 if (RT_SUCCESS(rc))
711 {
712 memset(pvPages, 0, cPages * PAGE_SIZE);
713
714 /*
715 * Create the MMIO2 range record for it.
716 */
717 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
718 PPGMMMIO2RANGE pNew;
719 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
720 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
721 if (RT_SUCCESS(rc))
722 {
723 pNew->pDevInsR3 = pDevIns;
724 pNew->pvR3 = pvPages;
725 //pNew->pNext = NULL;
726 //pNew->fMapped = false;
727 //pNew->fOverlapping = false;
728 pNew->iRegion = iRegion;
729 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
730 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
731 pNew->RamRange.pszDesc = pszDesc;
732 pNew->RamRange.cb = cb;
733 //pNew->RamRange.fFlags = 0;
734
735 pNew->RamRange.pvR3 = pvPages; ///@todo remove this [new phys code]
736#ifndef VBOX_WITH_NEW_PHYS_CODE
737 pNew->RamRange.paChunkR3Ptrs = NULL; ///@todo remove this [new phys code]
738#endif
739
740 uint32_t iPage = cPages;
741 while (iPage-- > 0)
742 {
743 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
744 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
745 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
746 }
747
748 /*
749 * Link it into the list.
750 * Since there is no particular order, just push it.
751 */
752 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
753 pVM->pgm.s.pMmio2RangesR3 = pNew;
754
755 *ppv = pvPages;
756 RTMemTmpFree(paPages);
757 return VINF_SUCCESS;
758 }
759
760 SUPR3PageFreeEx(pvPages, cPages);
761 }
762 RTMemTmpFree(paPages);
763 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
764 return rc;
765}
766
767
768/**
769 * Deregisters and frees an MMIO2 region.
770 *
771 * Any physical (and virtual) access handlers registered for the region must
772 * be deregistered before calling this function.
773 *
774 * @returns VBox status code.
775 * @param pVM Pointer to the shared VM structure.
776 * @param pDevIns The device instance owning the region.
777 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
778 */
779VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
780{
781 /*
782 * Validate input.
783 */
784 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
785 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
786 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
787
788 int rc = VINF_SUCCESS;
789 unsigned cFound = 0;
790 PPGMMMIO2RANGE pPrev = NULL;
791 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
792 while (pCur)
793 {
794 if ( pCur->pDevInsR3 == pDevIns
795 && ( iRegion == UINT32_MAX
796 || pCur->iRegion == iRegion))
797 {
798 cFound++;
799
800 /*
801 * Unmap it if it's mapped.
802 */
803 if (pCur->fMapped)
804 {
805 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
806 AssertRC(rc2);
807 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
808 rc = rc2;
809 }
810
811 /*
812 * Unlink it
813 */
814 PPGMMMIO2RANGE pNext = pCur->pNextR3;
815 if (pPrev)
816 pPrev->pNextR3 = pNext;
817 else
818 pVM->pgm.s.pMmio2RangesR3 = pNext;
819 pCur->pNextR3 = NULL;
820
821 /*
822 * Free the memory.
823 */
824 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
825 AssertRC(rc2);
826 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
827 rc = rc2;
828
829 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)(pCur->RamRange.cb >> PAGE_SHIFT), pCur->RamRange.pszDesc);
830 AssertRC(rc2);
831 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
832 rc = rc2;
833
834 /* we're leaking hyper memory here if done at runtime. */
835 Assert( VMR3GetState(pVM) == VMSTATE_OFF
836 || VMR3GetState(pVM) == VMSTATE_DESTROYING
837 || VMR3GetState(pVM) == VMSTATE_TERMINATED
838 || VMR3GetState(pVM) == VMSTATE_CREATING);
839 /*rc = MMHyperFree(pVM, pCur);
840 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
841
842 /* next */
843 pCur = pNext;
844 }
845 else
846 {
847 pPrev = pCur;
848 pCur = pCur->pNextR3;
849 }
850 }
851
852 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
853}
854
855
856/**
857 * Maps a MMIO2 region.
858 *
859 * This is done when a guest / the bios / state loading changes the
860 * PCI config. The replacing of base memory has the same restrictions
861 * as during registration, of course.
862 *
863 * @returns VBox status code.
864 *
865 * @param pVM Pointer to the shared VM structure.
866 * @param pDevIns The
867 */
868VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
869{
870 /*
871 * Validate input
872 */
873 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
874 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
875 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
876 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
877 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
878 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
879
880 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
881 AssertReturn(pCur, VERR_NOT_FOUND);
882 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
883 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
884 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
885
886 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
887 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
888
889 /*
890 * Find our location in the ram range list, checking for
891 * restriction we don't bother implementing yet (partially overlapping).
892 */
893 bool fRamExists = false;
894 PPGMRAMRANGE pRamPrev = NULL;
895 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
896 while (pRam && GCPhysLast >= pRam->GCPhys)
897 {
898 if ( GCPhys <= pRam->GCPhysLast
899 && GCPhysLast >= pRam->GCPhys)
900 {
901 /* completely within? */
902 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
903 && GCPhysLast <= pRam->GCPhysLast,
904 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
905 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
906 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
907 VERR_PGM_RAM_CONFLICT);
908 fRamExists = true;
909 break;
910 }
911
912 /* next */
913 pRamPrev = pRam;
914 pRam = pRam->pNextR3;
915 }
916 if (fRamExists)
917 {
918 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
919 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
920 while (cPagesLeft-- > 0)
921 {
922 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
923 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
924 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
925 VERR_PGM_RAM_CONFLICT);
926 pPage++;
927 }
928 }
929 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
930 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
931
932 /*
933 * Make the changes.
934 */
935 pgmLock(pVM);
936
937 pCur->RamRange.GCPhys = GCPhys;
938 pCur->RamRange.GCPhysLast = GCPhysLast;
939 pCur->fMapped = true;
940 pCur->fOverlapping = fRamExists;
941
942 if (fRamExists)
943 {
944 uint32_t cPendingPages = 0;
945 PGMMFREEPAGESREQ pReq;
946 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
947 AssertLogRelRCReturn(rc, rc);
948
949 /* replace the pages, freeing all present RAM pages. */
950 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
951 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
952 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
953 while (cPagesLeft-- > 0)
954 {
955 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
956 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
957
958 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
959 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
960 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
961 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
962
963 GCPhys += PAGE_SIZE;
964 pPageSrc++;
965 pPageDst++;
966 }
967
968 if (cPendingPages)
969 {
970 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
971 AssertLogRelRCReturn(rc, rc);
972 }
973 GMMR3FreePagesCleanup(pReq);
974 }
975 else
976 {
977 /* link in the ram range */
978 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
979 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, 0);
980 }
981
982 pgmUnlock(pVM);
983
984 return VINF_SUCCESS;
985}
986
987
988/**
989 * Unmaps a MMIO2 region.
990 *
991 * This is done when a guest / the bios / state loading changes the
992 * PCI config. The replacing of base memory has the same restrictions
993 * as during registration, of course.
994 */
995VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
996{
997 /*
998 * Validate input
999 */
1000 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1001 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1002 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1003 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1004 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1005 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1006
1007 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1008 AssertReturn(pCur, VERR_NOT_FOUND);
1009 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
1010 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
1011 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
1012
1013 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
1014 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
1015
1016 /*
1017 * Unmap it.
1018 */
1019 pgmLock(pVM);
1020
1021 if (pCur->fOverlapping)
1022 {
1023 /* Restore the RAM pages we've replaced. */
1024 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1025 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
1026 pRam = pRam->pNextR3;
1027
1028 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
1029 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
1030 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1031 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
1032 while (cPagesLeft-- > 0)
1033 {
1034 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhysZeroPg);
1035 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
1036 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
1037 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
1038
1039 pPageDst++;
1040 }
1041 }
1042 else
1043 {
1044 REMR3NotifyPhysRamDeregister(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
1045 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
1046 }
1047
1048 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
1049 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
1050 pCur->fOverlapping = false;
1051 pCur->fMapped = false;
1052
1053 pgmUnlock(pVM);
1054
1055 return VINF_SUCCESS;
1056}
1057
1058
1059/**
1060 * Checks if the given address is an MMIO2 base address or not.
1061 *
1062 * @returns true/false accordingly.
1063 * @param pVM Pointer to the shared VM structure.
1064 * @param pDevIns The owner of the memory, optional.
1065 * @param GCPhys The address to check.
1066 */
1067VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1068{
1069 /*
1070 * Validate input
1071 */
1072 VM_ASSERT_EMT_RETURN(pVM, false);
1073 AssertPtrReturn(pDevIns, false);
1074 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1075 AssertReturn(GCPhys != 0, false);
1076 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1077
1078 /*
1079 * Search the list.
1080 */
1081 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1082 if (pCur->RamRange.GCPhys == GCPhys)
1083 {
1084 Assert(pCur->fMapped);
1085 return true;
1086 }
1087 return false;
1088}
1089
1090
1091/**
1092 * Gets the HC physical address of a page in the MMIO2 region.
1093 *
1094 * This is API is intended for MMHyper and shouldn't be called
1095 * by anyone else...
1096 *
1097 * @returns VBox status code.
1098 * @param pVM Pointer to the shared VM structure.
1099 * @param pDevIns The owner of the memory, optional.
1100 * @param iRegion The region.
1101 * @param off The page expressed an offset into the MMIO2 region.
1102 * @param pHCPhys Where to store the result.
1103 */
1104VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1105{
1106 /*
1107 * Validate input
1108 */
1109 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1110 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1111 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1112
1113 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1114 AssertReturn(pCur, VERR_NOT_FOUND);
1115 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1116
1117 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1118 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1119 return VINF_SUCCESS;
1120}
1121
1122
1123/**
1124 * Maps a portion of an MMIO2 region into kernel space (host).
1125 *
1126 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
1127 * or the VM is terminated.
1128 *
1129 * @return VBox status code.
1130 *
1131 * @param pVM Pointer to the shared VM structure.
1132 * @param pDevIns The device owning the MMIO2 memory.
1133 * @param iRegion The region.
1134 * @param off The offset into the region. Must be page aligned.
1135 * @param cb The number of bytes to map. Must be page aligned.
1136 * @param pszDesc Mapping description.
1137 * @param pR0Ptr Where to store the R0 address.
1138 */
1139VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
1140 const char *pszDesc, PRTR0PTR pR0Ptr)
1141{
1142 /*
1143 * Validate input.
1144 */
1145 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1146 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1147 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1148
1149 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1150 AssertReturn(pCur, VERR_NOT_FOUND);
1151 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1152 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1153 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1154
1155 /*
1156 * Pass the request on to the support library/driver.
1157 */
1158 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
1159
1160 return rc;
1161}
1162
1163
1164/**
1165 * Registers a ROM image.
1166 *
1167 * Shadowed ROM images requires double the amount of backing memory, so,
1168 * don't use that unless you have to. Shadowing of ROM images is process
1169 * where we can select where the reads go and where the writes go. On real
1170 * hardware the chipset provides means to configure this. We provide
1171 * PGMR3PhysProtectROM() for this purpose.
1172 *
1173 * A read-only copy of the ROM image will always be kept around while we
1174 * will allocate RAM pages for the changes on demand (unless all memory
1175 * is configured to be preallocated).
1176 *
1177 * @returns VBox status.
1178 * @param pVM VM Handle.
1179 * @param pDevIns The device instance owning the ROM.
1180 * @param GCPhys First physical address in the range.
1181 * Must be page aligned!
1182 * @param cbRange The size of the range (in bytes).
1183 * Must be page aligned!
1184 * @param pvBinary Pointer to the binary data backing the ROM image.
1185 * This must be exactly \a cbRange in size.
1186 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
1187 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
1188 * @param pszDesc Pointer to description string. This must not be freed.
1189 *
1190 * @remark There is no way to remove the rom, automatically on device cleanup or
1191 * manually from the device yet. This isn't difficult in any way, it's
1192 * just not something we expect to be necessary for a while.
1193 */
1194VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1195 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
1196{
1197 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
1198 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
1199
1200 /*
1201 * Validate input.
1202 */
1203 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1204 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1205 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1206 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1207 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1208 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
1209 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1210 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
1211 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
1212
1213 const uint32_t cPages = cb >> PAGE_SHIFT;
1214
1215 /*
1216 * Find the ROM location in the ROM list first.
1217 */
1218 PPGMROMRANGE pRomPrev = NULL;
1219 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
1220 while (pRom && GCPhysLast >= pRom->GCPhys)
1221 {
1222 if ( GCPhys <= pRom->GCPhysLast
1223 && GCPhysLast >= pRom->GCPhys)
1224 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1225 GCPhys, GCPhysLast, pszDesc,
1226 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
1227 VERR_PGM_RAM_CONFLICT);
1228 /* next */
1229 pRomPrev = pRom;
1230 pRom = pRom->pNextR3;
1231 }
1232
1233 /*
1234 * Find the RAM location and check for conflicts.
1235 *
1236 * Conflict detection is a bit different than for RAM
1237 * registration since a ROM can be located within a RAM
1238 * range. So, what we have to check for is other memory
1239 * types (other than RAM that is) and that we don't span
1240 * more than one RAM range (layz).
1241 */
1242 bool fRamExists = false;
1243 PPGMRAMRANGE pRamPrev = NULL;
1244 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1245 while (pRam && GCPhysLast >= pRam->GCPhys)
1246 {
1247 if ( GCPhys <= pRam->GCPhysLast
1248 && GCPhysLast >= pRam->GCPhys)
1249 {
1250 /* completely within? */
1251 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1252 && GCPhysLast <= pRam->GCPhysLast,
1253 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
1254 GCPhys, GCPhysLast, pszDesc,
1255 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1256 VERR_PGM_RAM_CONFLICT);
1257 fRamExists = true;
1258 break;
1259 }
1260
1261 /* next */
1262 pRamPrev = pRam;
1263 pRam = pRam->pNextR3;
1264 }
1265 if (fRamExists)
1266 {
1267 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1268 uint32_t cPagesLeft = cPages;
1269 while (cPagesLeft-- > 0)
1270 {
1271 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1272 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
1273 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
1274 VERR_PGM_RAM_CONFLICT);
1275 Assert(PGM_PAGE_IS_ZERO(pPage));
1276 pPage++;
1277 }
1278 }
1279
1280 /*
1281 * Update the base memory reservation if necessary.
1282 */
1283 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
1284 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1285 cExtraBaseCost += cPages;
1286 if (cExtraBaseCost)
1287 {
1288 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
1289 if (RT_FAILURE(rc))
1290 return rc;
1291 }
1292
1293 /*
1294 * Allocate memory for the virgin copy of the RAM.
1295 */
1296 PGMMALLOCATEPAGESREQ pReq;
1297 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
1298 AssertRCReturn(rc, rc);
1299
1300 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1301 {
1302 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
1303 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
1304 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
1305 }
1306
1307 pgmLock(pVM);
1308 rc = GMMR3AllocatePagesPerform(pVM, pReq);
1309 pgmUnlock(pVM);
1310 if (RT_FAILURE(rc))
1311 {
1312 GMMR3AllocatePagesCleanup(pReq);
1313 return rc;
1314 }
1315
1316 /*
1317 * Allocate the new ROM range and RAM range (if necessary).
1318 */
1319 PPGMROMRANGE pRomNew;
1320 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
1321 if (RT_SUCCESS(rc))
1322 {
1323 PPGMRAMRANGE pRamNew = NULL;
1324 if (!fRamExists)
1325 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
1326 if (RT_SUCCESS(rc))
1327 {
1328 pgmLock(pVM);
1329
1330 /*
1331 * Initialize and insert the RAM range (if required).
1332 */
1333 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
1334 if (!fRamExists)
1335 {
1336 pRamNew->GCPhys = GCPhys;
1337 pRamNew->GCPhysLast = GCPhysLast;
1338 pRamNew->pszDesc = pszDesc;
1339 pRamNew->cb = cb;
1340 pRamNew->fFlags = 0;
1341 pRamNew->pvR3 = NULL;
1342
1343 PPGMPAGE pPage = &pRamNew->aPages[0];
1344 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1345 {
1346 PGM_PAGE_INIT(pPage,
1347 pReq->aPages[iPage].HCPhysGCPhys,
1348 pReq->aPages[iPage].idPage,
1349 PGMPAGETYPE_ROM,
1350 PGM_PAGE_STATE_ALLOCATED);
1351
1352 pRomPage->Virgin = *pPage;
1353 }
1354
1355 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
1356 }
1357 else
1358 {
1359 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1360 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1361 {
1362 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
1363 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
1364 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1365 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
1366
1367 pRomPage->Virgin = *pPage;
1368 }
1369
1370 pRamNew = pRam;
1371 }
1372 pgmUnlock(pVM);
1373
1374
1375 /*
1376 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
1377 *
1378 * If it's shadowed we'll register the handler after the ROM notification
1379 * so we get the access handler callbacks that we should. If it isn't
1380 * shadowed we'll do it the other way around to make REM use the built-in
1381 * ROM behavior and not the handler behavior (which is to route all access
1382 * to PGM atm).
1383 */
1384 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1385 {
1386 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
1387 rc = PGMR3HandlerPhysicalRegister(pVM,
1388 fFlags & PGMPHYS_ROM_FLAG_SHADOWED
1389 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1390 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
1391 GCPhys, GCPhysLast,
1392 pgmR3PhysRomWriteHandler, pRomNew,
1393 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1394 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
1395 }
1396 else
1397 {
1398 rc = PGMR3HandlerPhysicalRegister(pVM,
1399 fFlags & PGMPHYS_ROM_FLAG_SHADOWED
1400 ? PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1401 : PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
1402 GCPhys, GCPhysLast,
1403 pgmR3PhysRomWriteHandler, pRomNew,
1404 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1405 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
1406 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
1407 }
1408 if (RT_SUCCESS(rc))
1409 {
1410 pgmLock(pVM);
1411
1412 /*
1413 * Copy the image over to the virgin pages.
1414 * This must be done after linking in the RAM range.
1415 */
1416 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
1417 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
1418 {
1419 void *pvDstPage;
1420 PPGMPAGEMAP pMapIgnored;
1421 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
1422 if (RT_FAILURE(rc))
1423 {
1424 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
1425 break;
1426 }
1427 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
1428 }
1429 if (RT_SUCCESS(rc))
1430 {
1431 /*
1432 * Initialize the ROM range.
1433 * Note that the Virgin member of the pages has already been initialized above.
1434 */
1435 pRomNew->GCPhys = GCPhys;
1436 pRomNew->GCPhysLast = GCPhysLast;
1437 pRomNew->cb = cb;
1438 pRomNew->fFlags = fFlags;
1439 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
1440 pRomNew->pszDesc = pszDesc;
1441
1442 for (unsigned iPage = 0; iPage < cPages; iPage++)
1443 {
1444 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
1445 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
1446 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1447 }
1448
1449 /*
1450 * Insert the ROM range, tell REM and return successfully.
1451 */
1452 pRomNew->pNextR3 = pRom;
1453 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
1454 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
1455
1456 if (pRomPrev)
1457 {
1458 pRomPrev->pNextR3 = pRomNew;
1459 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
1460 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
1461 }
1462 else
1463 {
1464 pVM->pgm.s.pRomRangesR3 = pRomNew;
1465 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
1466 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
1467 }
1468
1469 GMMR3AllocatePagesCleanup(pReq);
1470 pgmUnlock(pVM);
1471 return VINF_SUCCESS;
1472 }
1473
1474 /* bail out */
1475
1476 pgmUnlock(pVM);
1477 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1478 AssertRC(rc2);
1479 pgmLock(pVM);
1480 }
1481
1482 if (!fRamExists)
1483 {
1484 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
1485 MMHyperFree(pVM, pRamNew);
1486 }
1487 }
1488 MMHyperFree(pVM, pRomNew);
1489 }
1490
1491 /** @todo Purge the mapping cache or something... */
1492 GMMR3FreeAllocatedPages(pVM, pReq);
1493 GMMR3AllocatePagesCleanup(pReq);
1494 pgmUnlock(pVM);
1495 return rc;
1496}
1497
1498
1499/**
1500 * \#PF Handler callback for ROM write accesses.
1501 *
1502 * @returns VINF_SUCCESS if the handler have carried out the operation.
1503 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1504 * @param pVM VM Handle.
1505 * @param GCPhys The physical address the guest is writing to.
1506 * @param pvPhys The HC mapping of that address.
1507 * @param pvBuf What the guest is reading/writing.
1508 * @param cbBuf How much it's reading/writing.
1509 * @param enmAccessType The access type.
1510 * @param pvUser User argument.
1511 */
1512static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1513{
1514 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
1515 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1516 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
1517 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1518 switch (pRomPage->enmProt)
1519 {
1520 /*
1521 * Ignore.
1522 */
1523 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
1524 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
1525 return VINF_SUCCESS;
1526
1527 /*
1528 * Write to the ram page.
1529 */
1530 case PGMROMPROT_READ_ROM_WRITE_RAM:
1531 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
1532 {
1533 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
1534 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
1535
1536 /*
1537 * Take the lock, do lazy allocation, map the page and copy the data.
1538 *
1539 * Note that we have to bypass the mapping TLB since it works on
1540 * guest physical addresses and entering the shadow page would
1541 * kind of screw things up...
1542 */
1543 int rc = pgmLock(pVM);
1544 AssertRC(rc);
1545
1546 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
1547 {
1548 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
1549 if (RT_FAILURE(rc))
1550 {
1551 pgmUnlock(pVM);
1552 return rc;
1553 }
1554 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1555 }
1556
1557 void *pvDstPage;
1558 PPGMPAGEMAP pMapIgnored;
1559 int rc2 = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
1560 if (RT_SUCCESS(rc2))
1561 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
1562 else
1563 rc = rc2;
1564
1565 pgmUnlock(pVM);
1566 return rc;
1567 }
1568
1569 default:
1570 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
1571 pRom->aPages[iPage].enmProt, iPage, GCPhys),
1572 VERR_INTERNAL_ERROR);
1573 }
1574}
1575
1576
1577/**
1578 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
1579 * and verify that the virgin part is untouched.
1580 *
1581 * This is done after the normal memory has been cleared.
1582 *
1583 * ASSUMES that the caller owns the PGM lock.
1584 *
1585 * @param pVM The VM handle.
1586 */
1587int pgmR3PhysRomReset(PVM pVM)
1588{
1589 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1590 {
1591 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
1592
1593 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1594 {
1595 /*
1596 * Reset the physical handler.
1597 */
1598 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
1599 AssertRCReturn(rc, rc);
1600
1601 /*
1602 * What we do with the shadow pages depends on the memory
1603 * preallocation option. If not enabled, we'll just throw
1604 * out all the dirty pages and replace them by the zero page.
1605 */
1606 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
1607 {
1608 /* Count dirty shadow pages. */
1609 uint32_t cDirty = 0;
1610 uint32_t iPage = cPages;
1611 while (iPage-- > 0)
1612 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1613 cDirty++;
1614 if (cDirty)
1615 {
1616 /* Free the dirty pages. */
1617 PGMMFREEPAGESREQ pReq;
1618 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
1619 AssertRCReturn(rc, rc);
1620
1621 uint32_t iReqPage = 0;
1622 for (iPage = 0; iPage < cPages; iPage++)
1623 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1624 {
1625 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
1626 iReqPage++;
1627 }
1628
1629 rc = GMMR3FreePagesPerform(pVM, pReq, cDirty);
1630 GMMR3FreePagesCleanup(pReq);
1631 AssertRCReturn(rc, rc);
1632
1633 /* setup the zero page. */
1634 for (iPage = 0; iPage < cPages; iPage++)
1635 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1636 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1637 }
1638 }
1639 else
1640 {
1641 /* clear all the pages. */
1642 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1643 {
1644 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1645 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
1646 if (RT_FAILURE(rc))
1647 break;
1648
1649 void *pvDstPage;
1650 PPGMPAGEMAP pMapIgnored;
1651 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
1652 if (RT_FAILURE(rc))
1653 break;
1654 ASMMemZeroPage(pvDstPage);
1655 }
1656 AssertRCReturn(rc, rc);
1657 }
1658 }
1659
1660#ifdef VBOX_STRICT
1661 /*
1662 * Verify that the virgin page is unchanged if possible.
1663 */
1664 if (pRom->pvOriginal)
1665 {
1666 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
1667 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
1668 {
1669 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1670 PPGMPAGEMAP pMapIgnored;
1671 void *pvDstPage;
1672 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
1673 if (RT_FAILURE(rc))
1674 break;
1675 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
1676 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
1677 GCPhys, pRom->pszDesc));
1678 }
1679 }
1680#endif
1681 }
1682
1683 return VINF_SUCCESS;
1684}
1685
1686
1687/**
1688 * Change the shadowing of a range of ROM pages.
1689 *
1690 * This is intended for implementing chipset specific memory registers
1691 * and will not be very strict about the input. It will silently ignore
1692 * any pages that are not the part of a shadowed ROM.
1693 *
1694 * @returns VBox status code.
1695 * @param pVM Pointer to the shared VM structure.
1696 * @param GCPhys Where to start. Page aligned.
1697 * @param cb How much to change. Page aligned.
1698 * @param enmProt The new ROM protection.
1699 */
1700VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
1701{
1702 /*
1703 * Check input
1704 */
1705 if (!cb)
1706 return VINF_SUCCESS;
1707 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1708 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1709 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1710 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1711 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
1712
1713 /*
1714 * Process the request.
1715 */
1716 bool fFlushedPool = false;
1717 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1718 if ( GCPhys <= pRom->GCPhysLast
1719 && GCPhysLast >= pRom->GCPhys)
1720 {
1721 /*
1722 * Iterate the relevant pages and the ncessary make changes.
1723 */
1724 bool fChanges = false;
1725 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
1726 ? pRom->cb >> PAGE_SHIFT
1727 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
1728 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1729 iPage < cPages;
1730 iPage++)
1731 {
1732 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1733 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
1734 {
1735 fChanges = true;
1736
1737 /* flush the page pool first so we don't leave any usage references dangling. */
1738 if (!fFlushedPool)
1739 {
1740 pgmPoolFlushAll(pVM);
1741 fFlushedPool = true;
1742 }
1743
1744 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1745 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1746 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
1747
1748 *pOld = *pRamPage;
1749 *pRamPage = *pNew;
1750 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
1751 }
1752 }
1753
1754 /*
1755 * Reset the access handler if we made changes, no need
1756 * to optimize this.
1757 */
1758 if (fChanges)
1759 {
1760 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
1761 AssertRCReturn(rc, rc);
1762 }
1763
1764 /* Advance - cb isn't updated. */
1765 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
1766 }
1767
1768 return VINF_SUCCESS;
1769}
1770
1771#ifndef VBOX_WITH_NEW_PHYS_CODE
1772
1773/**
1774 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
1775 * registration APIs calls to inform PGM about memory registrations.
1776 *
1777 * It registers the physical memory range with PGM. MM is responsible
1778 * for the toplevel things - allocation and locking - while PGM is taking
1779 * care of all the details and implements the physical address space virtualization.
1780 *
1781 * @returns VBox status.
1782 * @param pVM The VM handle.
1783 * @param pvRam HC virtual address of the RAM range. (page aligned)
1784 * @param GCPhys GC physical address of the RAM range. (page aligned)
1785 * @param cb Size of the RAM range. (page aligned)
1786 * @param fFlags Flags, MM_RAM_*.
1787 * @param paPages Pointer an array of physical page descriptors.
1788 * @param pszDesc Description string.
1789 */
1790VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1791{
1792 /*
1793 * Validate input.
1794 * (Not so important because callers are only MMR3PhysRegister()
1795 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1796 */
1797 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1798
1799 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
1800 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
1801 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
1802 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
1803 Assert(!(fFlags & ~0xfff));
1804 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1805 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1806 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1807 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1808 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1809 if (GCPhysLast < GCPhys)
1810 {
1811 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1812 return VERR_INVALID_PARAMETER;
1813 }
1814
1815 /*
1816 * Find range location and check for conflicts.
1817 */
1818 PPGMRAMRANGE pPrev = NULL;
1819 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1820 while (pCur)
1821 {
1822 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1823 {
1824 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1825 return VERR_PGM_RAM_CONFLICT;
1826 }
1827 if (GCPhysLast < pCur->GCPhys)
1828 break;
1829
1830 /* next */
1831 pPrev = pCur;
1832 pCur = pCur->pNextR3;
1833 }
1834
1835 /*
1836 * Allocate RAM range.
1837 * Small ranges are allocated from the heap, big ones have separate mappings.
1838 */
1839 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
1840 PPGMRAMRANGE pNew;
1841 int rc = VERR_NO_MEMORY;
1842 if (cbRam > PAGE_SIZE / 2)
1843 { /* large */
1844 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
1845 rc = MMR3HyperAllocOnceNoRel(pVM, cbRam, PAGE_SIZE, MM_TAG_PGM_PHYS, (void **)&pNew);
1846 AssertMsgRC(rc, ("MMR3HyperAllocOnceNoRel(,%#x,,) -> %Rrc\n", cbRam, rc));
1847 }
1848 else
1849 { /* small */
1850 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1851 AssertMsgRC(rc, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, rc));
1852 }
1853 if (RT_SUCCESS(rc))
1854 {
1855 /*
1856 * Initialize the range.
1857 */
1858 pNew->pvR3 = pvRam;
1859 pNew->GCPhys = GCPhys;
1860 pNew->GCPhysLast = GCPhysLast;
1861 pNew->cb = cb;
1862 pNew->fFlags = fFlags;
1863 pNew->paChunkR3Ptrs = NULL;
1864
1865 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
1866 if (paPages)
1867 {
1868 while (iPage-- > 0)
1869 {
1870 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1871 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
1872 PGM_PAGE_STATE_ALLOCATED);
1873 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1874 }
1875 }
1876 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1877 {
1878 /* Allocate memory for chunk to HC ptr lookup array. */
1879 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
1880 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, cb), rc);
1881
1882 /* Physical memory will be allocated on demand. */
1883 while (iPage-- > 0)
1884 {
1885 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
1886 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1887 }
1888 }
1889 else
1890 {
1891 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1892 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
1893 while (iPage-- > 0)
1894 {
1895 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
1896 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1897 }
1898 }
1899
1900 /*
1901 * Insert the new RAM range.
1902 */
1903 pgmLock(pVM);
1904 pNew->pNextR3 = pCur;
1905 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1906 pNew->pNextRC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTRCPTR;
1907 if (pPrev)
1908 {
1909 pPrev->pNextR3 = pNew;
1910 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1911 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
1912 }
1913 else
1914 {
1915 pVM->pgm.s.pRamRangesR3 = pNew;
1916 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1917 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
1918 }
1919 pgmUnlock(pVM);
1920 }
1921 return rc;
1922}
1923
1924
1925/**
1926 * Register a chunk of a the physical memory range with PGM. MM is responsible
1927 * for the toplevel things - allocation and locking - while PGM is taking
1928 * care of all the details and implements the physical address space virtualization.
1929 *
1930 *
1931 * @returns VBox status.
1932 * @param pVM The VM handle.
1933 * @param pvRam HC virtual address of the RAM range. (page aligned)
1934 * @param GCPhys GC physical address of the RAM range. (page aligned)
1935 * @param cb Size of the RAM range. (page aligned)
1936 * @param fFlags Flags, MM_RAM_*.
1937 * @param paPages Pointer an array of physical page descriptors.
1938 * @param pszDesc Description string.
1939 */
1940VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1941{
1942 NOREF(pszDesc);
1943
1944 /*
1945 * Validate input.
1946 * (Not so important because callers are only MMR3PhysRegister()
1947 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1948 */
1949 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1950
1951 Assert(paPages);
1952 Assert(pvRam);
1953 Assert(!(fFlags & ~0xfff));
1954 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1955 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1956 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1957 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1958 Assert(VM_IS_EMT(pVM));
1959 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1960 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1961
1962 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1963 if (GCPhysLast < GCPhys)
1964 {
1965 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1966 return VERR_INVALID_PARAMETER;
1967 }
1968
1969 /*
1970 * Find existing range location.
1971 */
1972 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1973 while (pRam)
1974 {
1975 RTGCPHYS off = GCPhys - pRam->GCPhys;
1976 if ( off < pRam->cb
1977 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1978 break;
1979
1980 pRam = pRam->CTX_SUFF(pNext);
1981 }
1982 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1983
1984 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1985 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);
1986 if (paPages)
1987 {
1988 while (iPage-- > 0)
1989 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1990 }
1991 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1992 pRam->paChunkR3Ptrs[off] = (uintptr_t)pvRam;
1993
1994 /* Notify the recompiler. */
1995 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1996
1997 return VINF_SUCCESS;
1998}
1999
2000
2001/**
2002 * Allocate missing physical pages for an existing guest RAM range.
2003 *
2004 * @returns VBox status.
2005 * @param pVM The VM handle.
2006 * @param GCPhys GC physical address of the RAM range. (page aligned)
2007 */
2008VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
2009{
2010 RTGCPHYS GCPhys = *pGCPhys;
2011
2012 /*
2013 * Walk range list.
2014 */
2015 pgmLock(pVM);
2016
2017 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2018 while (pRam)
2019 {
2020 RTGCPHYS off = GCPhys - pRam->GCPhys;
2021 if ( off < pRam->cb
2022 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
2023 {
2024 bool fRangeExists = false;
2025 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
2026
2027 /* Note: A request made from another thread may end up in EMT after somebody else has already allocated the range. */
2028 if (pRam->paChunkR3Ptrs[off])
2029 fRangeExists = true;
2030
2031 pgmUnlock(pVM);
2032 if (fRangeExists)
2033 return VINF_SUCCESS;
2034 return pgmr3PhysGrowRange(pVM, GCPhys);
2035 }
2036
2037 pRam = pRam->CTX_SUFF(pNext);
2038 }
2039 pgmUnlock(pVM);
2040 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2041}
2042
2043
2044/**
2045 * Allocate missing physical pages for an existing guest RAM range.
2046 *
2047 * @returns VBox status.
2048 * @param pVM The VM handle.
2049 * @param pRamRange RAM range
2050 * @param GCPhys GC physical address of the RAM range. (page aligned)
2051 */
2052int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
2053{
2054 void *pvRam;
2055 int rc;
2056
2057 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
2058 if (!VM_IS_EMT(pVM))
2059 {
2060 PVMREQ pReq;
2061 const RTGCPHYS GCPhysParam = GCPhys;
2062
2063 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
2064
2065 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
2066 if (RT_SUCCESS(rc))
2067 {
2068 rc = pReq->iStatus;
2069 VMR3ReqFree(pReq);
2070 }
2071 return rc;
2072 }
2073
2074 /* Round down to chunk boundary */
2075 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
2076
2077 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DynRamGrow);
2078 STAM_COUNTER_ADD(&pVM->pgm.s.StatR3DynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
2079
2080 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %RGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
2081
2082 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
2083
2084 for (;;)
2085 {
2086 rc = SUPPageAlloc(cPages, &pvRam);
2087 if (RT_SUCCESS(rc))
2088 {
2089 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
2090 if (RT_SUCCESS(rc))
2091 return rc;
2092
2093 SUPPageFree(pvRam, cPages);
2094 }
2095
2096 VMSTATE enmVMState = VMR3GetState(pVM);
2097 if (enmVMState != VMSTATE_RUNNING)
2098 {
2099 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %RGp!\n", GCPhys));
2100 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %RGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
2101 return rc;
2102 }
2103
2104 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
2105
2106 /* Pause first, then inform Main. */
2107 rc = VMR3SuspendNoSave(pVM);
2108 AssertRC(rc);
2109
2110 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM");
2111
2112 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
2113 rc = VMR3WaitForResume(pVM);
2114
2115 /* Retry */
2116 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
2117 }
2118}
2119
2120
2121/**
2122 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
2123 * flags of existing RAM ranges.
2124 *
2125 * @returns VBox status.
2126 * @param pVM The VM handle.
2127 * @param GCPhys GC physical address of the RAM range. (page aligned)
2128 * @param cb Size of the RAM range. (page aligned)
2129 * @param fFlags The Or flags, MM_RAM_* \#defines.
2130 * @param fMask The and mask for the flags.
2131 */
2132VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
2133{
2134 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
2135
2136 /*
2137 * Validate input.
2138 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
2139 */
2140 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
2141 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2142 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2143 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2144 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2145
2146 /*
2147 * Lookup the range.
2148 */
2149 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2150 while (pRam && GCPhys > pRam->GCPhysLast)
2151 pRam = pRam->CTX_SUFF(pNext);
2152 if ( !pRam
2153 || GCPhys > pRam->GCPhysLast
2154 || GCPhysLast < pRam->GCPhys)
2155 {
2156 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
2157 return VERR_INVALID_PARAMETER;
2158 }
2159
2160 /*
2161 * Update the requested flags.
2162 */
2163 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
2164 | fMask;
2165 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
2166 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2167 for ( ; iPage < iPageEnd; iPage++)
2168 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
2169
2170 return VINF_SUCCESS;
2171}
2172
2173#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2174
2175/**
2176 * Sets the Address Gate 20 state.
2177 *
2178 * @param pVM VM handle.
2179 * @param fEnable True if the gate should be enabled.
2180 * False if the gate should be disabled.
2181 */
2182VMMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
2183{
2184 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
2185 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
2186 {
2187 pVM->pgm.s.fA20Enabled = fEnable;
2188 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2189 REMR3A20Set(pVM, fEnable);
2190 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
2191 }
2192}
2193
2194
2195/**
2196 * Tree enumeration callback for dealing with age rollover.
2197 * It will perform a simple compression of the current age.
2198 */
2199static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2200{
2201 /* Age compression - ASSUMES iNow == 4. */
2202 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2203 if (pChunk->iAge >= UINT32_C(0xffffff00))
2204 pChunk->iAge = 3;
2205 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2206 pChunk->iAge = 2;
2207 else if (pChunk->iAge)
2208 pChunk->iAge = 1;
2209 else /* iAge = 0 */
2210 pChunk->iAge = 4;
2211
2212 /* reinsert */
2213 PVM pVM = (PVM)pvUser;
2214 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2215 pChunk->AgeCore.Key = pChunk->iAge;
2216 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2217 return 0;
2218}
2219
2220
2221/**
2222 * Tree enumeration callback that updates the chunks that have
2223 * been used since the last
2224 */
2225static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2226{
2227 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2228 if (!pChunk->iAge)
2229 {
2230 PVM pVM = (PVM)pvUser;
2231 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2232 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2233 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2234 }
2235
2236 return 0;
2237}
2238
2239
2240/**
2241 * Performs ageing of the ring-3 chunk mappings.
2242 *
2243 * @param pVM The VM handle.
2244 */
2245VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2246{
2247 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2248 pVM->pgm.s.ChunkR3Map.iNow++;
2249 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2250 {
2251 pVM->pgm.s.ChunkR3Map.iNow = 4;
2252 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2253 }
2254 else
2255 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2256}
2257
2258
2259/**
2260 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2261 */
2262typedef struct PGMR3PHYSCHUNKUNMAPCB
2263{
2264 PVM pVM; /**< The VM handle. */
2265 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2266} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2267
2268
2269/**
2270 * Callback used to find the mapping that's been unused for
2271 * the longest time.
2272 */
2273static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2274{
2275 do
2276 {
2277 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2278 if ( pChunk->iAge
2279 && !pChunk->cRefs)
2280 {
2281 /*
2282 * Check that it's not in any of the TLBs.
2283 */
2284 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2285 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2286 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2287 {
2288 pChunk = NULL;
2289 break;
2290 }
2291 if (pChunk)
2292 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2293 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2294 {
2295 pChunk = NULL;
2296 break;
2297 }
2298 if (pChunk)
2299 {
2300 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2301 return 1; /* done */
2302 }
2303 }
2304
2305 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2306 pNode = pNode->pList;
2307 } while (pNode);
2308 return 0;
2309}
2310
2311
2312/**
2313 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2314 *
2315 * The candidate will not be part of any TLBs, so no need to flush
2316 * anything afterwards.
2317 *
2318 * @returns Chunk id.
2319 * @param pVM The VM handle.
2320 */
2321static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2322{
2323 /*
2324 * Do tree ageing first?
2325 */
2326 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2327 PGMR3PhysChunkAgeing(pVM);
2328
2329 /*
2330 * Enumerate the age tree starting with the left most node.
2331 */
2332 PGMR3PHYSCHUNKUNMAPCB Args;
2333 Args.pVM = pVM;
2334 Args.pChunk = NULL;
2335 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2336 return Args.pChunk->Core.Key;
2337 return INT32_MAX;
2338}
2339
2340
2341/**
2342 * Maps the given chunk into the ring-3 mapping cache.
2343 *
2344 * This will call ring-0.
2345 *
2346 * @returns VBox status code.
2347 * @param pVM The VM handle.
2348 * @param idChunk The chunk in question.
2349 * @param ppChunk Where to store the chunk tracking structure.
2350 *
2351 * @remarks Called from within the PGM critical section.
2352 */
2353int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
2354{
2355 int rc;
2356 /*
2357 * Allocate a new tracking structure first.
2358 */
2359#if 0 /* for later when we've got a separate mapping method for ring-0. */
2360 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
2361 AssertReturn(pChunk, VERR_NO_MEMORY);
2362#else
2363 PPGMCHUNKR3MAP pChunk;
2364 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
2365 AssertRCReturn(rc, rc);
2366#endif
2367 pChunk->Core.Key = idChunk;
2368 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
2369 pChunk->iAge = 0;
2370 pChunk->cRefs = 0;
2371 pChunk->cPermRefs = 0;
2372 pChunk->pv = NULL;
2373
2374 /*
2375 * Request the ring-0 part to map the chunk in question and if
2376 * necessary unmap another one to make space in the mapping cache.
2377 */
2378 GMMMAPUNMAPCHUNKREQ Req;
2379 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
2380 Req.Hdr.cbReq = sizeof(Req);
2381 Req.pvR3 = NULL;
2382 Req.idChunkMap = idChunk;
2383 Req.idChunkUnmap = NIL_GMM_CHUNKID;
2384 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
2385 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
2386 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
2387 if (RT_SUCCESS(rc))
2388 {
2389 /*
2390 * Update the tree.
2391 */
2392 /* insert the new one. */
2393 AssertPtr(Req.pvR3);
2394 pChunk->pv = Req.pvR3;
2395 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2396 AssertRelease(fRc);
2397 pVM->pgm.s.ChunkR3Map.c++;
2398
2399 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2400 AssertRelease(fRc);
2401
2402 /* remove the unmapped one. */
2403 if (Req.idChunkUnmap != NIL_GMM_CHUNKID)
2404 {
2405 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2406 AssertRelease(pUnmappedChunk);
2407 pUnmappedChunk->pv = NULL;
2408 pUnmappedChunk->Core.Key = UINT32_MAX;
2409#if 0 /* for later when we've got a separate mapping method for ring-0. */
2410 MMR3HeapFree(pUnmappedChunk);
2411#else
2412 MMHyperFree(pVM, pUnmappedChunk);
2413#endif
2414 pVM->pgm.s.ChunkR3Map.c--;
2415 }
2416 }
2417 else
2418 {
2419 AssertRC(rc);
2420#if 0 /* for later when we've got a separate mapping method for ring-0. */
2421 MMR3HeapFree(pChunk);
2422#else
2423 MMHyperFree(pVM, pChunk);
2424#endif
2425 pChunk = NULL;
2426 }
2427
2428 *ppChunk = pChunk;
2429 return rc;
2430}
2431
2432
2433/**
2434 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
2435 *
2436 * @returns see pgmR3PhysChunkMap.
2437 * @param pVM The VM handle.
2438 * @param idChunk The chunk to map.
2439 */
2440VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
2441{
2442 PPGMCHUNKR3MAP pChunk;
2443 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
2444}
2445
2446
2447/**
2448 * Invalidates the TLB for the ring-3 mapping cache.
2449 *
2450 * @param pVM The VM handle.
2451 */
2452VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
2453{
2454 pgmLock(pVM);
2455 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2456 {
2457 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
2458 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
2459 }
2460 pgmUnlock(pVM);
2461}
2462
2463
2464/**
2465 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
2466 *
2467 * @returns The following VBox status codes.
2468 * @retval VINF_SUCCESS on success. FF cleared.
2469 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
2470 *
2471 * @param pVM The VM handle.
2472 */
2473VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
2474{
2475 pgmLock(pVM);
2476
2477 /*
2478 * Allocate more pages, noting down the index of the first new page.
2479 */
2480 uint32_t iClear = pVM->pgm.s.cHandyPages;
2481 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
2482 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
2483 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2484 if (rc == VERR_GMM_SEED_ME)
2485 {
2486 void *pvChunk;
2487 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
2488 if (RT_SUCCESS(rc))
2489 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
2490 if (RT_SUCCESS(rc))
2491 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2492 }
2493
2494 /*
2495 * Clear the pages.
2496 */
2497 if (RT_SUCCESS(rc))
2498 {
2499 while (iClear < pVM->pgm.s.cHandyPages)
2500 {
2501 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
2502 void *pv;
2503 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
2504 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
2505 ASMMemZeroPage(pv);
2506 iClear++;
2507 }
2508
2509 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
2510 }
2511 else
2512 {
2513 LogRel(("PGM: Failed to procure handy pages, rc=%Rrc cHandyPages=%u\n",
2514 rc, pVM->pgm.s.cHandyPages));
2515 rc = VERR_EM_NO_MEMORY;
2516 //rc = VINF_EM_NO_MEMORY;
2517 //VM_FF_SET(pVM, VM_FF_PGM_WE_ARE_SCREWED?);
2518 }
2519
2520/** @todo Do proper VERR_EM_NO_MEMORY reporting. */
2521 AssertMsg( pVM->pgm.s.cHandyPages == RT_ELEMENTS(pVM->pgm.s.aHandyPages)
2522 || rc != VINF_SUCCESS, ("%d rc=%Rrc\n", pVM->pgm.s.cHandyPages, rc));
2523 pgmUnlock(pVM);
2524 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY || rc == VERR_EM_NO_MEMORY);
2525 return rc;
2526}
2527
2528
2529/**
2530 * Frees the specified RAM page and replaces it with the ZERO page.
2531 *
2532 * This is used by ballooning, remapping MMIO2 and RAM reset.
2533 *
2534 * @param pVM Pointer to the shared VM structure.
2535 * @param pReq Pointer to the request.
2536 * @param pPage Pointer to the page structure.
2537 * @param GCPhys The guest physical address of the page, if applicable.
2538 *
2539 * @remarks The caller must own the PGM lock.
2540 */
2541static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
2542{
2543 /*
2544 * Assert sanity.
2545 */
2546 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
2547 if (RT_UNLIKELY(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM))
2548 {
2549 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
2550 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
2551 }
2552
2553 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
2554 return VINF_SUCCESS;
2555
2556 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
2557 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
2558 || idPage > GMM_PAGEID_LAST
2559 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
2560 {
2561 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
2562 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
2563 }
2564
2565 /*
2566 * pPage = ZERO page.
2567 */
2568 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
2569 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
2570 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
2571
2572 /*
2573 * Make sure it's not in the handy page array.
2574 */
2575 uint32_t i = pVM->pgm.s.cHandyPages;
2576 while (i < RT_ELEMENTS(pVM->pgm.s.aHandyPages))
2577 {
2578 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
2579 {
2580 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
2581 break;
2582 }
2583 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
2584 {
2585 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
2586 break;
2587 }
2588 i++;
2589 }
2590
2591 /*
2592 * Push it onto the page array.
2593 */
2594 uint32_t iPage = *pcPendingPages;
2595 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
2596 *pcPendingPages += 1;
2597
2598 pReq->aPages[iPage].idPage = idPage;
2599
2600 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
2601 return VINF_SUCCESS;
2602
2603 /*
2604 * Flush the pages.
2605 */
2606 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
2607 if (RT_SUCCESS(rc))
2608 {
2609 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2610 *pcPendingPages = 0;
2611 }
2612 return rc;
2613}
2614
2615
2616/**
2617 * Converts a GC physical address to a HC ring-3 pointer, with some
2618 * additional checks.
2619 *
2620 * @returns VBox status code.
2621 * @retval VINF_SUCCESS on success.
2622 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
2623 * access handler of some kind.
2624 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
2625 * accesses or is odd in any way.
2626 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
2627 *
2628 * @param pVM The VM handle.
2629 * @param GCPhys The GC physical address to convert.
2630 * @param fWritable Whether write access is required.
2631 * @param ppv Where to store the pointer corresponding to GCPhys on
2632 * success.
2633 */
2634VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
2635{
2636 pgmLock(pVM);
2637
2638 PPGMRAMRANGE pRam;
2639 PPGMPAGE pPage;
2640 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
2641 if (RT_SUCCESS(rc))
2642 {
2643#ifdef VBOX_WITH_NEW_PHYS_CODE
2644 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
2645 rc = VINF_SUCCESS;
2646 else
2647 {
2648 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
2649 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
2650 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2651 {
2652 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
2653 * in -norawr0 mode. */
2654 if (fWritable)
2655 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
2656 }
2657 else
2658 {
2659 /* Temporariliy disabled phycial handler(s), since the recompiler
2660 doesn't get notified when it's reset we'll have to pretend its
2661 operating normally. */
2662 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
2663 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
2664 else
2665 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
2666 }
2667 }
2668 if (RT_SUCCESS(rc))
2669 {
2670 int rc2;
2671
2672 /* Make sure what we return is writable. */
2673 if (fWritable && rc != VINF_PGM_PHYS_TLB_CATCH_WRITE)
2674 switch (PGM_PAGE_GET_STATE(pPage))
2675 {
2676 case PGM_PAGE_STATE_ALLOCATED:
2677 break;
2678 case PGM_PAGE_STATE_ZERO:
2679 case PGM_PAGE_STATE_SHARED:
2680 case PGM_PAGE_STATE_WRITE_MONITORED:
2681 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
2682 AssertLogRelRCReturn(rc2, rc2);
2683 break;
2684 }
2685
2686 /* Get a ring-3 mapping of the address. */
2687 PPGMPAGER3MAPTLBE pTlbe;
2688 rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
2689 AssertLogRelRCReturn(rc2, rc2);
2690 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
2691 /** @todo mapping/locking hell; this isn't horribly efficient since
2692 * pgmPhysPageLoadIntoTlb will repeate the lookup we've done here. */
2693
2694 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
2695 }
2696 else
2697 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
2698
2699 /* else: handler catching all access, no pointer returned. */
2700
2701#else
2702 if (0)
2703 /* nothing */;
2704 else if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
2705 {
2706 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
2707 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
2708 else if (fWritable && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2709 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
2710 else
2711 {
2712 /* Temporariliy disabled phycial handler(s), since the recompiler
2713 doesn't get notified when it's reset we'll have to pretend its
2714 operating normally. */
2715 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
2716 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
2717 else
2718 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
2719 }
2720 }
2721 else
2722 rc = VINF_SUCCESS;
2723 if (RT_SUCCESS(rc))
2724 {
2725 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2726 {
2727 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
2728 RTGCPHYS off = GCPhys - pRam->GCPhys;
2729 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
2730 *ppv = (void *)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2731 }
2732 else if (RT_LIKELY(pRam->pvR3))
2733 {
2734 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));
2735 RTGCPHYS off = GCPhys - pRam->GCPhys;
2736 *ppv = (uint8_t *)pRam->pvR3 + off;
2737 }
2738 else
2739 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
2740 }
2741#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2742 }
2743 else
2744 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
2745
2746 pgmUnlock(pVM);
2747 return rc;
2748}
2749
2750
2751
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette