VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 7802

Last change on this file since 7802 was 7755, checked in by vboxsync, 17 years ago

Added a flag to the MMIO2 registration and did some other MMIO2 related cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.1 KB
Line 
1/* $Id: PGMPhys.cpp 7755 2008-04-04 22:21:04Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/pgm.h>
24#include <VBox/cpum.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/csam.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/dbg.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#include <iprt/thread.h>
41#include <iprt/string.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47/*static - shut up warning */
48DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
49
50
51
52/*
53 * PGMR3PhysReadByte/Word/Dword
54 * PGMR3PhysWriteByte/Word/Dword
55 */
56/** @todo rename and add U64. */
57
58#define PGMPHYSFN_READNAME PGMR3PhysReadByte
59#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
60#define PGMPHYS_DATASIZE 1
61#define PGMPHYS_DATATYPE uint8_t
62#include "PGMPhys.h"
63
64#define PGMPHYSFN_READNAME PGMR3PhysReadWord
65#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
66#define PGMPHYS_DATASIZE 2
67#define PGMPHYS_DATATYPE uint16_t
68#include "PGMPhys.h"
69
70#define PGMPHYSFN_READNAME PGMR3PhysReadDword
71#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
72#define PGMPHYS_DATASIZE 4
73#define PGMPHYS_DATATYPE uint32_t
74#include "PGMPhys.h"
75
76
77
78/**
79 * Links a new RAM range into the list.
80 *
81 * @param pVM Pointer to the shared VM structure.
82 * @param pNew Pointer to the new list entry.
83 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
84 */
85static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
86{
87 pgmLock(pVM);
88
89 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
90 pNew->pNextR3 = pRam;
91 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
92 pNew->pNextGC = pRam ? MMHyperCCToGC(pVM, pRam) : NIL_RTGCPTR;
93
94 if (pPrev)
95 {
96 pPrev->pNextR3 = pNew;
97 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
98 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
99 }
100 else
101 {
102 pVM->pgm.s.pRamRangesR3 = pNew;
103 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
104 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
105 }
106
107 pgmUnlock(pVM);
108}
109
110
111/**
112 * Unlink an existing RAM range from the list.
113 *
114 * @param pVM Pointer to the shared VM structure.
115 * @param pRam Pointer to the new list entry.
116 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
117 */
118static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
119{
120 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
121
122 pgmLock(pVM);
123
124 PPGMRAMRANGE pNext = pRam->pNextR3;
125 if (pPrev)
126 {
127 pPrev->pNextR3 = pNext;
128 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
129 pPrev->pNextGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
130 }
131 else
132 {
133 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
134 pVM->pgm.s.pRamRangesR3 = pNext;
135 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
136 pVM->pgm.s.pRamRangesGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
137 }
138
139 pgmUnlock(pVM);
140}
141
142
143/**
144 * Unlink an existing RAM range from the list.
145 *
146 * @param pVM Pointer to the shared VM structure.
147 * @param pRam Pointer to the new list entry.
148 */
149static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
150{
151 /* find prev. */
152 PPGMRAMRANGE pPrev = NULL;
153 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
154 while (pCur != pRam)
155 {
156 pPrev = pCur;
157 pCur = pCur->pNextR3;
158 }
159 AssertFatal(pCur);
160
161 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
162}
163
164
165
166/**
167 * Sets up a range RAM.
168 *
169 * This will check for conflicting registrations, make a resource
170 * reservation for the memory (with GMM), and setup the per-page
171 * tracking structures (PGMPAGE).
172 *
173 * @returns VBox stutus code.
174 * @param pVM Pointer to the shared VM structure.
175 * @param GCPhys The physical address of the RAM.
176 * @param cb The size of the RAM.
177 * @param pszDesc The description - not copied, so, don't free or change it.
178 */
179PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
180{
181 /*
182 * Validate input.
183 */
184 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
185 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
186 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
187 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
188 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
189 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
190 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
191 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
192
193 /*
194 * Find range location and check for conflicts.
195 * (We don't lock here because the locking by EMT is only required on update.)
196 */
197 PPGMRAMRANGE pPrev = NULL;
198 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
199 while (pRam && GCPhysLast >= pRam->GCPhys)
200 {
201 if ( GCPhysLast >= pRam->GCPhys
202 && GCPhys <= pRam->GCPhysLast)
203 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
204 GCPhys, GCPhysLast, pszDesc,
205 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
206 VERR_PGM_RAM_CONFLICT);
207
208 /* next */
209 pPrev = pRam;
210 pRam = pRam->pNextR3;
211 }
212
213 /*
214 * Register it with GMM (the API bitches).
215 */
216 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
217 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
218 if (RT_FAILURE(rc))
219 return rc;
220
221 /*
222 * Allocate RAM range.
223 */
224 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
225 PPGMRAMRANGE pNew;
226 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
227 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
228
229 /*
230 * Initialize the range.
231 */
232 pNew->GCPhys = GCPhys;
233 pNew->GCPhysLast = GCPhysLast;
234 pNew->pszDesc = pszDesc;
235 pNew->cb = cb;
236 pNew->fFlags = 0;
237
238 pNew->pvHC = NULL;
239 pNew->pavHCChunkHC = NULL;
240 pNew->pavHCChunkGC = 0;
241
242#ifndef VBOX_WITH_NEW_PHYS_CODE
243 /* Allocate memory for chunk to HC ptr lookup array. */
244 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
245 AssertRCReturn(rc, rc);
246 pNew->pavHCChunkGC = MMHyperCCToGC(pVM, pNew->pavHCChunkHC);
247 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
248
249#endif
250 RTGCPHYS iPage = cPages;
251 while (iPage-- > 0)
252 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
253
254 /*
255 * Insert the new RAM range.
256 */
257 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
258
259 /*
260 * Notify REM.
261 */
262#ifdef VBOX_WITH_NEW_PHYS_CODE
263 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
264#else
265 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
266#endif
267
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * Resets (zeros) the RAM.
274 *
275 * ASSUMES that the caller owns the PGM lock.
276 *
277 * @returns VBox status code.
278 * @param pVM Pointer to the shared VM structure.
279 */
280int pgmR3PhysRamReset(PVM pVM)
281{
282 /*
283 * Walk the ram ranges.
284 */
285 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
286 {
287 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
288#ifdef VBOX_WITH_NEW_PHYS_CODE
289 if (!pVM->pgm.f.fRamPreAlloc)
290 {
291 /* Replace all RAM pages by ZERO pages. */
292 while (iPage-- > 0)
293 {
294 PPGMPAGE pPage = &pRam->aPages[iPage];
295 switch (PGM_PAGE_GET_TYPE(pPage))
296 {
297 case PGMPAGETYPE_RAM:
298 if (!PGM_PAGE_IS_ZERO(pPage))
299 pgmPhysFreePage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
300 break;
301
302 case PGMPAGETYPE_MMIO2:
303 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
304 case PGMPAGETYPE_ROM:
305 case PGMPAGETYPE_MMIO:
306 break;
307 default:
308 AssertFailed();
309 }
310 } /* for each page */
311 }
312 else
313#endif
314 {
315 /* Zero the memory. */
316 while (iPage-- > 0)
317 {
318 PPGMPAGE pPage = &pRam->aPages[iPage];
319 switch (PGM_PAGE_GET_TYPE(pPage))
320 {
321#ifndef VBOX_WITH_NEW_PHYS_CODE
322 case PGMPAGETYPE_INVALID:
323 case PGMPAGETYPE_RAM:
324 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
325 {
326 /* shadow ram is reloaded elsewhere. */
327 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
328 continue;
329 }
330 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
331 {
332 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
333 if (pRam->pavHCChunkHC[iChunk])
334 ASMMemZero32((char *)pRam->pavHCChunkHC[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
335 }
336 else
337 ASMMemZero32((char *)pRam->pvHC + (iPage << PAGE_SHIFT), PAGE_SIZE);
338 break;
339#else /* VBOX_WITH_NEW_PHYS_CODE */
340 case PGMPAGETYPE_RAM:
341 switch (PGM_PAGE_GET_STATE(pPage))
342 {
343 case PGM_PAGE_STATE_ZERO:
344 break;
345 case PGM_PAGE_STATE_SHARED:
346 case PGM_PAGE_STATE_WRITE_MONITORED:
347 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
348 AssertLogRelRCReturn(rc, rc);
349 case PGM_PAGE_STATE_ALLOCATED:
350 {
351 void *pvPage;
352 PPGMPAGEMAP pMapIgnored;
353 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT), &pMapIgnored, &pvPage);
354 AssertLogRelRCReturn(rc, rc);
355 ASMMemZeroPage(pvPage);
356 break;
357 }
358 }
359 break;
360#endif /* VBOX_WITH_NEW_PHYS_CODE */
361
362 case PGMPAGETYPE_MMIO2:
363 case PGMPAGETYPE_ROM_SHADOW:
364 case PGMPAGETYPE_ROM:
365 case PGMPAGETYPE_MMIO:
366 break;
367 default:
368 AssertFailed();
369
370 }
371 } /* for each page */
372 }
373
374 }
375
376 return VINF_SUCCESS;
377}
378
379
380/**
381 * This is the interface IOM is using to register an MMIO region.
382 *
383 * It will check for conflicts and ensure that a RAM range structure
384 * is present before calling the PGMR3HandlerPhysicalRegister API to
385 * register the callbacks.
386 *
387 * @returns VBox status code.
388 *
389 * @param pVM Pointer to the shared VM structure.
390 * @param GCPhys The start of the MMIO region.
391 * @param cb The size of the MMIO region.
392 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
393 * @param pvUserR3 The user argument for R3.
394 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
395 * @param pvUserR0 The user argument for R0.
396 * @param pfnHandlerGC The address of the GC handler. (IOMMMIOHandler)
397 * @param pvUserGC The user argument for GC.
398 * @param pszDesc The description of the MMIO region.
399 */
400PDMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
401 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
402 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
403 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
404 R3PTRTYPE(const char *) pszDesc)
405{
406 /*
407 * Assert on some assumption.
408 */
409 VM_ASSERT_EMT(pVM);
410 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
411 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
412 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
413 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
414
415 /*
416 * Make sure there's a RAM range structure for the region.
417 */
418 int rc;
419 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
420 bool fRamExists = false;
421 PPGMRAMRANGE pRamPrev = NULL;
422 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
423 while (pRam && GCPhysLast >= pRam->GCPhys)
424 {
425 if ( GCPhysLast >= pRam->GCPhys
426 && GCPhys <= pRam->GCPhysLast)
427 {
428 /* Simplification: all within the same range. */
429 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
430 && GCPhysLast <= pRam->GCPhysLast,
431 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
432 GCPhys, GCPhysLast, pszDesc,
433 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
434 VERR_PGM_RAM_CONFLICT);
435
436 /* Check that it's all RAM or MMIO pages. */
437 PCPGMPAGE pPage = &pRam->aPages[GCPhys - pRam->GCPhys >> PAGE_SHIFT];
438 uint32_t cLeft = cb >> PAGE_SHIFT;
439 while (cLeft-- > 0)
440 {
441 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
442 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
443 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
444 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
445 VERR_PGM_RAM_CONFLICT);
446 pPage++;
447 }
448
449 /* Looks good. */
450 fRamExists = true;
451 break;
452 }
453
454 /* next */
455 pRamPrev = pRam;
456 pRam = pRam->pNextR3;
457 }
458 PPGMRAMRANGE pNew;
459 if (fRamExists)
460 pNew = NULL;
461 else
462 {
463 /*
464 * No RAM range, insert an ad-hoc one.
465 *
466 * Note that we don't have to tell REM about this range because
467 * PGMHandlerPhysicalRegisterEx will do that for us.
468 */
469 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
470
471 const uint32_t cPages = cb >> PAGE_SHIFT;
472 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
473 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
474 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
475
476 /* Initialize the range. */
477 pNew->GCPhys = GCPhys;
478 pNew->GCPhysLast = GCPhysLast;
479 pNew->pszDesc = pszDesc;
480 pNew->cb = cb;
481 pNew->fFlags = 0; /* Some MMIO flag here? */
482
483 pNew->pvHC = NULL;
484 pNew->pavHCChunkHC = NULL;
485 pNew->pavHCChunkGC = 0;
486
487 uint32_t iPage = cPages;
488 while (iPage-- > 0)
489 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
490 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
491
492 /* link it */
493 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
494 }
495
496 /*
497 * Register the access handler.
498 */
499 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
500 pfnHandlerR3, pvUserR3,
501 pfnHandlerR0, pvUserR0,
502 pfnHandlerGC, pvUserGC, pszDesc);
503 if ( RT_FAILURE(rc)
504 && !fRamExists)
505 {
506 /* remove the ad-hoc range. */
507 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
508 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
509 MMHyperFree(pVM, pRam);
510 }
511
512 return rc;
513}
514
515
516/**
517 * This is the interface IOM is using to register an MMIO region.
518 *
519 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
520 * any ad-hoc PGMRAMRANGE left behind.
521 *
522 * @returns VBox status code.
523 * @param pVM Pointer to the shared VM structure.
524 * @param GCPhys The start of the MMIO region.
525 * @param cb The size of the MMIO region.
526 */
527PDMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
528{
529 VM_ASSERT_EMT(pVM);
530
531 /*
532 * First deregister the handler, then check if we should remove the ram range.
533 */
534 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
535 if (RT_SUCCESS(rc))
536 {
537 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
538 PPGMRAMRANGE pRamPrev = NULL;
539 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
540 while (pRam && GCPhysLast >= pRam->GCPhys)
541 {
542 /*if ( GCPhysLast >= pRam->GCPhys
543 && GCPhys <= pRam->GCPhysLast) - later */
544 if ( GCPhysLast == pRam->GCPhysLast
545 && GCPhys == pRam->GCPhys)
546 {
547 Assert(pRam->cb == cb);
548
549 /*
550 * See if all the pages are dead MMIO pages.
551 */
552 bool fAllMMIO = true;
553 PPGMPAGE pPage = &pRam->aPages[0];
554 uint32_t cLeft = cb >> PAGE_SHIFT;
555 while (cLeft-- > 0)
556 {
557 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
558 /*|| not-out-of-action later */)
559 {
560 fAllMMIO = false;
561 break;
562 }
563 pPage++;
564 }
565
566 /*
567 * Unlink it and free if it's all MMIO.
568 */
569 if (fAllMMIO)
570 {
571 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
572 GCPhys, GCPhysLast, pRam->pszDesc));
573
574 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
575 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
576 MMHyperFree(pVM, pRam);
577 }
578 break;
579 }
580
581 /* next */
582 pRamPrev = pRam;
583 pRam = pRam->pNextR3;
584 }
585 }
586
587 return rc;
588}
589
590
591/**
592 * Locate a MMIO2 range.
593 *
594 * @returns Pointer to the MMIO2 range.
595 * @param pVM Pointer to the shared VM structure.
596 * @param pDevIns The device instance owning the region.
597 * @param iRegion The region.
598 */
599DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
600{
601 /*
602 * Search the list.
603 */
604 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
605 if (pCur->pDevInsR3 == pDevIns)
606 return pCur;
607 return NULL;
608}
609
610
611/**
612 * Allocate and register a MMIO2 region.
613 *
614 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
615 * RAM associated with a device. It is also non-shared memory with a
616 * permanent ring-3 mapping and page backing (presently).
617 *
618 * A MMIO2 range may overlap with base memory if a lot of RAM
619 * is configured for the VM, in which case we'll drop the base
620 * memory pages. Presently we will make no attempt to preserve
621 * anything that happens to be present in the base memory that
622 * is replaced, this is of course incorrectly but it's too much
623 * effort.
624 *
625 * @returns VBox status code.
626 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
627 * @retval VERR_ALREADY_EXISTS if the region already exists.
628 *
629 * @param pVM Pointer to the shared VM structure.
630 * @param pDevIns The device instance owning the region.
631 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
632 * this number has to be the number of that region. Otherwise
633 * it can be any number safe UINT8_MAX.
634 * @param cb The size of the region. Must be page aligned.
635 * @param fFlags Reserved for future use, must be zero.
636 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
637 * @param pszDesc The description.
638 */
639PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
640{
641 /*
642 * Validate input.
643 */
644 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
645 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
646 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
647 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
648 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
649 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
650 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
651 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
652 AssertReturn(cb, VERR_INVALID_PARAMETER);
653 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
654
655 const uint32_t cPages = cb >> PAGE_SHIFT;
656 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
657 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
658
659 /*
660 * Try reserve and allocate the backing memory first as this is what is
661 * most likely to fail.
662 */
663 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
664 if (RT_FAILURE(rc))
665 return rc;
666
667 void *pvPages;
668 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
669 if (RT_SUCCESS(rc))
670 rc = SUPPageAllocLockedEx(cPages, &pvPages, paPages);
671 if (RT_SUCCESS(rc))
672 {
673 /*
674 * Create the MMIO2 range record for it.
675 */
676 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
677 PPGMMMIO2RANGE pNew;
678 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
679 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
680 if (RT_SUCCESS(rc))
681 {
682 pNew->pDevInsR3 = pDevIns;
683 pNew->pvR3 = pvPages;
684 //pNew->pNext = NULL;
685 //pNew->fMapped = false;
686 //pNew->fOverlapping = false;
687 pNew->iRegion = iRegion;
688 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
689 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
690 pNew->RamRange.pszDesc = pszDesc;
691 pNew->RamRange.cb = cb;
692 //pNew->RamRange.fFlags = 0;
693
694 pNew->RamRange.pvHC = pvPages; ///@todo remove this
695 pNew->RamRange.pavHCChunkHC = NULL; ///@todo remove this
696 pNew->RamRange.pavHCChunkGC = 0; ///@todo remove this
697
698 uint32_t iPage = cPages;
699 while (iPage-- > 0)
700 {
701 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
702 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
703 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
704 }
705
706 /*
707 * Link it into the list.
708 * Since there is no particular order, just push it.
709 */
710 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
711 pVM->pgm.s.pMmio2RangesR3 = pNew;
712
713 *ppv = pvPages;
714 RTMemTmpFree(paPages);
715 return VINF_SUCCESS;
716 }
717
718 SUPPageFreeLocked(pvPages, cPages);
719 }
720 RTMemTmpFree(paPages);
721 MMR3AdjustFixedReservation(pVM, -cPages, pszDesc);
722 return rc;
723}
724
725
726/**
727 * Deregisters and frees a MMIO2 region.
728 *
729 * Any physical (and virtual) access handlers registered for the region must
730 * be deregistered before calling this function.
731 *
732 * @returns VBox status code.
733 * @param pVM Pointer to the shared VM structure.
734 * @param pDevIns The device instance owning the region.
735 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
736 */
737PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
738{
739 /*
740 * Validate input.
741 */
742 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
743 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
744 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
745
746 int rc = VINF_SUCCESS;
747 unsigned cFound = 0;
748 PPGMMMIO2RANGE pPrev = NULL;
749 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
750 while (pCur)
751 {
752 if ( pCur->pDevInsR3 == pDevIns
753 && ( iRegion == UINT32_MAX
754 || pCur->iRegion == iRegion))
755 {
756 cFound++;
757
758 /*
759 * Unmap it if it's mapped.
760 */
761 if (pCur->fMapped)
762 {
763 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
764 AssertRC(rc2);
765 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
766 rc = rc2;
767 }
768
769 /*
770 * Unlink it
771 */
772 PPGMMMIO2RANGE pNext = pCur->pNextR3;
773 if (pPrev)
774 pPrev->pNextR3 = pNext;
775 else
776 pVM->pgm.s.pMmio2RangesR3 = pNext;
777 pCur->pNextR3 = NULL;
778
779 /*
780 * Free the memory.
781 */
782 int rc2 = SUPPageFreeLocked(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
783 AssertRC(rc2);
784 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
785 rc = rc2;
786
787 rc2 = MMR3AdjustFixedReservation(pVM, -(pCur->RamRange.cb >> PAGE_SHIFT), pCur->RamRange.pszDesc);
788 AssertRC(rc2);
789 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
790 rc = rc2;
791
792 /* we're leaking hyper memory here if done at runtime. */
793 Assert( VMR3GetState(pVM) == VMSTATE_OFF
794 || VMR3GetState(pVM) == VMSTATE_DESTROYING
795 || VMR3GetState(pVM) == VMSTATE_TERMINATED);
796 /*rc = MMHyperFree(pVM, pCur);
797 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
798
799 /* next */
800 pCur = pNext;
801 }
802 else
803 {
804 pPrev = pCur;
805 pCur = pCur->pNextR3;
806 }
807 }
808
809 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
810}
811
812
813/**
814 * Maps a MMIO2 region.
815 *
816 * This is done when a guest / the bios / state loading changes the
817 * PCI config. The replacing of base memory has the same restrictions
818 * as during registration, of course.
819 *
820 * @returns VBox status code.
821 *
822 * @param pVM Pointer to the shared VM structure.
823 * @param pDevIns The
824 */
825PDMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
826{
827 /*
828 * Validate input
829 */
830 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
831 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
832 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
833 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
834 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
835 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
836
837 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
838 AssertReturn(pCur, VERR_NOT_FOUND);
839 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
840 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
841 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
842
843 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
844 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
845
846 /*
847 * Find our location in the ram range list, checking for
848 * restriction we don't bother implementing yet (partially overlapping).
849 */
850 bool fRamExists = false;
851 PPGMRAMRANGE pRamPrev = NULL;
852 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
853 while (pRam && GCPhysLast >= pRam->GCPhys)
854 {
855 if ( GCPhys <= pRam->GCPhysLast
856 && GCPhysLast >= pRam->GCPhys)
857 {
858 /* completely within? */
859 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
860 && GCPhysLast <= pRam->GCPhysLast,
861 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
862 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
863 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
864 VERR_PGM_RAM_CONFLICT);
865 fRamExists = true;
866 break;
867 }
868
869 /* next */
870 pRamPrev = pRam;
871 pRam = pRam->pNextR3;
872 }
873 if (fRamExists)
874 {
875 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
876 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
877 while (cPagesLeft-- > 0)
878 {
879 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
880 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
881 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
882 VERR_PGM_RAM_CONFLICT);
883 pPage++;
884 }
885 }
886 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
887 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
888
889 /*
890 * Make the changes.
891 */
892 pgmLock(pVM);
893
894 pCur->RamRange.GCPhys = GCPhys;
895 pCur->RamRange.GCPhysLast = GCPhysLast;
896 pCur->fMapped = true;
897 pCur->fOverlapping = fRamExists;
898
899 if (fRamExists)
900 {
901 /* replace the pages, freeing all present RAM pages. */
902 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
903 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
904 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
905 while (cPagesLeft-- > 0)
906 {
907 pgmPhysFreePage(pVM, pPageDst, GCPhys);
908
909 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
910 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
911 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
912 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
913
914 GCPhys += PAGE_SIZE;
915 pPageSrc++;
916 pPageDst++;
917 }
918 }
919 else
920 {
921 /* link in the ram range */
922 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
923 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, 0);
924 }
925
926 pgmUnlock(pVM);
927
928 return VINF_SUCCESS;
929}
930
931
932/**
933 * Unmaps a MMIO2 region.
934 *
935 * This is done when a guest / the bios / state loading changes the
936 * PCI config. The replacing of base memory has the same restrictions
937 * as during registration, of course.
938 */
939PDMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
940{
941 /*
942 * Validate input
943 */
944 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
945 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
946 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
947 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
948 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
949 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
950
951 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
952 AssertReturn(pCur, VERR_NOT_FOUND);
953 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
954 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
955 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
956
957 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
958 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
959
960 /*
961 * Unmap it.
962 */
963 pgmLock(pVM);
964
965 if (pCur->fOverlapping)
966 {
967 /* Restore the RAM pages we've replaced. */
968 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
969 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
970 pRam = pRam->pNextR3;
971
972 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
973 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
974 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
975 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
976 while (cPagesLeft-- > 0)
977 {
978 PGM_PAGE_SET_HCPHYS(pPageDst, pVM->pgm.s.HCPhysZeroPg);
979 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
980 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
981
982 pPageDst++;
983 }
984 }
985 else
986 {
987 REMR3NotifyPhysReserve(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
988 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
989 }
990
991 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
992 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
993 pCur->fOverlapping = false;
994 pCur->fMapped = false;
995
996 pgmUnlock(pVM);
997
998 return VINF_SUCCESS;
999}
1000
1001
1002/**
1003 * Checks if the given address is an MMIO2 base address or not.
1004 *
1005 * @returns true/false accordingly.
1006 * @param pVM Pointer to the shared VM structure.
1007 * @param pDevIns The owner of the memory, optional.
1008 * @param GCPhys The address to check.
1009 */
1010PDMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1011{
1012 /*
1013 * Validate input
1014 */
1015 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1016 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1017 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
1018 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
1019 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1020
1021 /*
1022 * Search the list.
1023 */
1024 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1025 if (pCur->RamRange.GCPhys == GCPhys)
1026 {
1027 Assert(pCur->fMapped);
1028 return true;
1029 }
1030 return false;
1031}
1032
1033
1034/**
1035 * Gets the HC physical address of a page in the MMIO2 region.
1036 *
1037 * This is API is intended for MMHyper and shouldn't be called
1038 * by anyone else...
1039 *
1040 * @returns VBox status code.
1041 * @param pVM Pointer to the shared VM structure.
1042 * @param pDevIns The owner of the memory, optional.
1043 * @param iRegion The region.
1044 * @param off The page expressed an offset into the MMIO2 region.
1045 * @param pHCPhys Where to store the result.
1046 */
1047PDMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1048{
1049 /*
1050 * Validate input
1051 */
1052 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1053 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1054 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1055
1056 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1057 AssertReturn(pCur, VERR_NOT_FOUND);
1058 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1059
1060 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1061 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1062 return VINF_SUCCESS;
1063}
1064
1065
1066/**
1067 * Registers a ROM image.
1068 *
1069 * Shadowed ROM images requires double the amount of backing memory, so,
1070 * don't use that unless you have to. Shadowing of ROM images is process
1071 * where we can select where the reads go and where the writes go. On real
1072 * hardware the chipset provides means to configure this. We provide
1073 * PGMR3PhysProtectROM() for this purpose.
1074 *
1075 * A read-only copy of the ROM image will always be kept around while we
1076 * will allocate RAM pages for the changes on demand (unless all memory
1077 * is configured to be preallocated).
1078 *
1079 * @returns VBox status.
1080 * @param pVM VM Handle.
1081 * @param pDevIns The device instance owning the ROM.
1082 * @param GCPhys First physical address in the range.
1083 * Must be page aligned!
1084 * @param cbRange The size of the range (in bytes).
1085 * Must be page aligned!
1086 * @param pvBinary Pointer to the binary data backing the ROM image.
1087 * This must be exactly \a cbRange in size.
1088 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
1089 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
1090 * @param pszDesc Pointer to description string. This must not be freed.
1091 *
1092 * @remark There is no way to remove the rom, automatically on device cleanup or
1093 * manually from the device yet. This isn't difficult in any way, it's
1094 * just not something we expect to be necessary for a while.
1095 */
1096PGMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1097 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
1098{
1099 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
1100 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
1101
1102 /*
1103 * Validate input.
1104 */
1105 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1106 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1107 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1108 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1109 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1110 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
1111 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1112 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
1113 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
1114
1115 const uint32_t cPages = cb >> PAGE_SHIFT;
1116
1117 /*
1118 * Find the ROM location in the ROM list first.
1119 */
1120 PPGMROMRANGE pRomPrev = NULL;
1121 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
1122 while (pRom && GCPhysLast >= pRom->GCPhys)
1123 {
1124 if ( GCPhys <= pRom->GCPhysLast
1125 && GCPhysLast >= pRom->GCPhys)
1126 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1127 GCPhys, GCPhysLast, pszDesc,
1128 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
1129 VERR_PGM_RAM_CONFLICT);
1130 /* next */
1131 pRomPrev = pRom;
1132 pRom = pRom->pNextR3;
1133 }
1134
1135 /*
1136 * Find the RAM location and check for conflicts.
1137 *
1138 * Conflict detection is a bit different than for RAM
1139 * registration since a ROM can be located within a RAM
1140 * range. So, what we have to check for is other memory
1141 * types (other than RAM that is) and that we don't span
1142 * more than one RAM range (layz).
1143 */
1144 bool fRamExists = false;
1145 PPGMRAMRANGE pRamPrev = NULL;
1146 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1147 while (pRam && GCPhysLast >= pRam->GCPhys)
1148 {
1149 if ( GCPhys <= pRam->GCPhysLast
1150 && GCPhysLast >= pRam->GCPhys)
1151 {
1152 /* completely within? */
1153 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1154 && GCPhysLast <= pRam->GCPhysLast,
1155 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
1156 GCPhys, GCPhysLast, pszDesc,
1157 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1158 VERR_PGM_RAM_CONFLICT);
1159 fRamExists = true;
1160 break;
1161 }
1162
1163 /* next */
1164 pRamPrev = pRam;
1165 pRam = pRam->pNextR3;
1166 }
1167 if (fRamExists)
1168 {
1169 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1170 uint32_t cPagesLeft = cPages;
1171 while (cPagesLeft-- > 0)
1172 {
1173 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1174 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
1175 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
1176 VERR_PGM_RAM_CONFLICT);
1177 Assert(PGM_PAGE_IS_ZERO(pPage));
1178 pPage++;
1179 }
1180 }
1181
1182 /*
1183 * Update the base memory reservation if necessary.
1184 */
1185 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
1186 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1187 cExtraBaseCost += cPages;
1188 if (cExtraBaseCost)
1189 {
1190 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
1191 if (RT_FAILURE(rc))
1192 return rc;
1193 }
1194
1195 /*
1196 * Allocate memory for the virgin copy of the RAM.
1197 */
1198 PGMMALLOCATEPAGESREQ pReq;
1199 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
1200 AssertRCReturn(rc, rc);
1201
1202 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1203 {
1204 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
1205 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
1206 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
1207 }
1208
1209 pgmLock(pVM);
1210 rc = GMMR3AllocatePagesPerform(pVM, pReq);
1211 pgmUnlock(pVM);
1212 if (RT_FAILURE(rc))
1213 {
1214 GMMR3AllocatePagesCleanup(pReq);
1215 return rc;
1216 }
1217
1218 /*
1219 * Allocate the new ROM range and RAM range (if necessary).
1220 */
1221 PPGMROMRANGE pRomNew;
1222 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
1223 if (RT_SUCCESS(rc))
1224 {
1225 PPGMRAMRANGE pRamNew = NULL;
1226 if (!fRamExists)
1227 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
1228 if (RT_SUCCESS(rc))
1229 {
1230 pgmLock(pVM);
1231
1232 /*
1233 * Initialize and insert the RAM range (if required).
1234 */
1235 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
1236 if (!fRamExists)
1237 {
1238 pRamNew->GCPhys = GCPhys;
1239 pRamNew->GCPhysLast = GCPhysLast;
1240 pRamNew->pszDesc = pszDesc;
1241 pRamNew->cb = cb;
1242 pRamNew->fFlags = 0;
1243 pRamNew->pvHC = NULL;
1244
1245 PPGMPAGE pPage = &pRamNew->aPages[0];
1246 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1247 {
1248 PGM_PAGE_INIT(pPage,
1249 pReq->aPages[iPage].HCPhysGCPhys,
1250 pReq->aPages[iPage].idPage,
1251 PGMPAGETYPE_ROM,
1252 PGM_PAGE_STATE_ALLOCATED);
1253
1254 pRomPage->Virgin = *pPage;
1255 }
1256
1257 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
1258 }
1259 else
1260 {
1261 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1262 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1263 {
1264 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
1265 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
1266 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1267 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
1268
1269 pRomPage->Virgin = *pPage;
1270 }
1271
1272 pRamNew = pRam;
1273 }
1274 pgmUnlock(pVM);
1275
1276
1277 /*
1278 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
1279 */
1280 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
1281#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
1282 pgmR3PhysRomWriteHandler, pRomNew,
1283#else
1284 NULL, NULL,
1285#endif
1286 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1287 NULL, "pgmPhysRomWriteHandler", MMHyperCCToGC(pVM, pRomNew), pszDesc);
1288 if (RT_SUCCESS(rc))
1289 {
1290 pgmLock(pVM);
1291
1292 /*
1293 * Copy the image over to the virgin pages.
1294 * This must be done after linking in the RAM range.
1295 */
1296 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
1297 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
1298 {
1299 void *pvDstPage;
1300 PPGMPAGEMAP pMapIgnored;
1301 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
1302 if (RT_FAILURE(rc))
1303 {
1304 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
1305 break;
1306 }
1307 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
1308 }
1309 if (RT_SUCCESS(rc))
1310 {
1311 /*
1312 * Initialize the ROM range.
1313 * Note that the Virgin member of the pages has already been initialized above.
1314 */
1315 pRomNew->GCPhys = GCPhys;
1316 pRomNew->GCPhysLast = GCPhysLast;
1317 pRomNew->cb = cb;
1318 pRomNew->fFlags = fFlags;
1319 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
1320 pRomNew->pszDesc = pszDesc;
1321
1322 for (unsigned iPage = 0; iPage < cPages; iPage++)
1323 {
1324 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
1325 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
1326 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1327 }
1328
1329 /*
1330 * Insert the ROM range, tell REM and return successfully.
1331 */
1332 pRomNew->pNextR3 = pRom;
1333 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
1334 pRomNew->pNextGC = pRom ? MMHyperCCToGC(pVM, pRom) : NIL_RTGCPTR;
1335
1336 if (pRomPrev)
1337 {
1338 pRomPrev->pNextR3 = pRomNew;
1339 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
1340 pRomPrev->pNextGC = MMHyperCCToGC(pVM, pRomNew);
1341 }
1342 else
1343 {
1344 pVM->pgm.s.pRomRangesR3 = pRomNew;
1345 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
1346 pVM->pgm.s.pRomRangesGC = MMHyperCCToGC(pVM, pRomNew);
1347 }
1348
1349 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
1350
1351 GMMR3AllocatePagesCleanup(pReq);
1352 pgmUnlock(pVM);
1353 return VINF_SUCCESS;
1354 }
1355
1356 /* bail out */
1357
1358 pgmUnlock(pVM);
1359 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1360 AssertRC(rc2);
1361 pgmLock(pVM);
1362 }
1363
1364 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
1365 if (pRamNew)
1366 MMHyperFree(pVM, pRamNew);
1367 }
1368 MMHyperFree(pVM, pRomNew);
1369 }
1370
1371 /** @todo Purge the mapping cache or something... */
1372 GMMR3FreeAllocatedPages(pVM, pReq);
1373 GMMR3AllocatePagesCleanup(pReq);
1374 pgmUnlock(pVM);
1375 return rc;
1376}
1377
1378
1379/**
1380 * \#PF Handler callback for ROM write accesses.
1381 *
1382 * @returns VINF_SUCCESS if the handler have carried out the operation.
1383 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1384 * @param pVM VM Handle.
1385 * @param GCPhys The physical address the guest is writing to.
1386 * @param pvPhys The HC mapping of that address.
1387 * @param pvBuf What the guest is reading/writing.
1388 * @param cbBuf How much it's reading/writing.
1389 * @param enmAccessType The access type.
1390 * @param pvUser User argument.
1391 */
1392/*static - shut up warning */
1393 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1394{
1395 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
1396 const uint32_t iPage = GCPhys - pRom->GCPhys;
1397 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
1398 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1399 switch (pRomPage->enmProt)
1400 {
1401 /*
1402 * Ignore.
1403 */
1404 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
1405 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
1406 return VINF_SUCCESS;
1407
1408 /*
1409 * Write to the ram page.
1410 */
1411 case PGMROMPROT_READ_ROM_WRITE_RAM:
1412 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
1413 {
1414 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
1415 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
1416
1417 /*
1418 * Take the lock, do lazy allocation, map the page and copy the data.
1419 *
1420 * Note that we have to bypass the mapping TLB since it works on
1421 * guest physical addresses and entering the shadow page would
1422 * kind of screw things up...
1423 */
1424 int rc = pgmLock(pVM);
1425 AssertRC(rc);
1426
1427 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
1428 {
1429 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
1430 if (RT_FAILURE(rc))
1431 {
1432 pgmUnlock(pVM);
1433 return rc;
1434 }
1435 }
1436
1437 void *pvDstPage;
1438 PPGMPAGEMAP pMapIgnored;
1439 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
1440 if (RT_SUCCESS(rc))
1441 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
1442
1443 pgmUnlock(pVM);
1444 return rc;
1445 }
1446
1447 default:
1448 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
1449 pRom->aPages[iPage].enmProt, iPage, GCPhys),
1450 VERR_INTERNAL_ERROR);
1451 }
1452}
1453
1454
1455
1456/**
1457 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
1458 * and verify that the virgin part is untouched.
1459 *
1460 * This is done after the normal memory has been cleared.
1461 *
1462 * ASSUMES that the caller owns the PGM lock.
1463 *
1464 * @param pVM The VM handle.
1465 */
1466int pgmR3PhysRomReset(PVM pVM)
1467{
1468 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1469 {
1470 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
1471
1472 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1473 {
1474 /*
1475 * Reset the physical handler.
1476 */
1477 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
1478 AssertRCReturn(rc, rc);
1479
1480 /*
1481 * What we do with the shadow pages depends on the memory
1482 * preallocation option. If not enabled, we'll just throw
1483 * out all the dirty pages and replace them by the zero page.
1484 */
1485 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
1486 {
1487 /* Count dirty shadow pages. */
1488 uint32_t cDirty = 0;
1489 uint32_t iPage = cPages;
1490 while (iPage-- > 0)
1491 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1492 cDirty++;
1493 if (cDirty)
1494 {
1495 /* Free the dirty pages. */
1496 PGMMFREEPAGESREQ pReq;
1497 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
1498 AssertRCReturn(rc, rc);
1499
1500 uint32_t iReqPage = 0;
1501 for (iPage = 0; iPage < cPages; iPage++)
1502 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1503 {
1504 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
1505 iReqPage++;
1506 }
1507
1508 rc = GMMR3FreePagesPerform(pVM, pReq);
1509 GMMR3FreePagesCleanup(pReq);
1510 AssertRCReturn(rc, rc);
1511
1512 /* setup the zero page. */
1513 for (iPage = 0; iPage < cPages; iPage++)
1514 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1515 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1516 }
1517 }
1518 else
1519 {
1520 /* clear all the pages. */
1521 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1522 {
1523 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1524 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
1525 if (RT_FAILURE(rc))
1526 break;
1527
1528 void *pvDstPage;
1529 PPGMPAGEMAP pMapIgnored;
1530 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
1531 if (RT_FAILURE(rc))
1532 break;
1533 ASMMemZeroPage(pvDstPage);
1534 }
1535 AssertRCReturn(rc, rc);
1536 }
1537 }
1538
1539#ifdef VBOX_STRICT
1540 /*
1541 * Verify that the virgin page is unchanged if possible.
1542 */
1543 if (pRom->pvOriginal)
1544 {
1545 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
1546 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
1547 {
1548 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1549 PPGMPAGEMAP pMapIgnored;
1550 void *pvDstPage;
1551 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
1552 if (RT_FAILURE(rc))
1553 break;
1554 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
1555 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
1556 GCPhys, pRom->pszDesc));
1557 }
1558 }
1559#endif
1560 }
1561
1562 return VINF_SUCCESS;
1563}
1564
1565
1566/**
1567 * Change the shadowing of a range of ROM pages.
1568 *
1569 * This is intended for implementing chipset specific memory registers
1570 * and will not be very strict about the input. It will silently ignore
1571 * any pages that are not the part of a shadowed ROM.
1572 *
1573 * @returns VBox status code.
1574 * @param pVM Pointer to the shared VM structure.
1575 * @param GCPhys Where to start. Page aligned.
1576 * @param cb How much to change. Page aligned.
1577 * @param enmProt The new ROM protection.
1578 */
1579PGMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
1580{
1581 /*
1582 * Check input
1583 */
1584 if (!cb)
1585 return VINF_SUCCESS;
1586 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1587 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1588 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1589 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1590 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
1591
1592 /*
1593 * Process the request.
1594 */
1595 bool fFlushedPool = false;
1596 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1597 if ( GCPhys <= pRom->GCPhysLast
1598 && GCPhysLast >= pRom->GCPhys)
1599 {
1600 /*
1601 * Iterate the relevant pages and the ncessary make changes.
1602 */
1603 bool fChanges = false;
1604 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
1605 ? pRom->cb >> PAGE_SHIFT
1606 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
1607 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1608 iPage < cPages;
1609 iPage++)
1610 {
1611 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1612 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
1613 {
1614 fChanges = true;
1615
1616 /* flush the page pool first so we don't leave any usage references dangling. */
1617 if (!fFlushedPool)
1618 {
1619 pgmPoolFlushAll(pVM);
1620 fFlushedPool = true;
1621 }
1622
1623 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1624 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1625 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
1626
1627 *pOld = *pRamPage;
1628 *pRamPage = *pNew;
1629 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
1630 }
1631 }
1632
1633 /*
1634 * Reset the access handler if we made changes, no need
1635 * to optimize this.
1636 */
1637 if (fChanges)
1638 {
1639 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
1640 AssertRCReturn(rc, rc);
1641 }
1642
1643 /* Advance - cb isn't updated. */
1644 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
1645 }
1646
1647 return VINF_SUCCESS;
1648}
1649
1650
1651/**
1652 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
1653 * registration APIs calls to inform PGM about memory registrations.
1654 *
1655 * It registers the physical memory range with PGM. MM is responsible
1656 * for the toplevel things - allocation and locking - while PGM is taking
1657 * care of all the details and implements the physical address space virtualization.
1658 *
1659 * @returns VBox status.
1660 * @param pVM The VM handle.
1661 * @param pvRam HC virtual address of the RAM range. (page aligned)
1662 * @param GCPhys GC physical address of the RAM range. (page aligned)
1663 * @param cb Size of the RAM range. (page aligned)
1664 * @param fFlags Flags, MM_RAM_*.
1665 * @param paPages Pointer an array of physical page descriptors.
1666 * @param pszDesc Description string.
1667 */
1668PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1669{
1670 /*
1671 * Validate input.
1672 * (Not so important because callers are only MMR3PhysRegister()
1673 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1674 */
1675 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1676
1677 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
1678 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
1679 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
1680 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
1681 Assert(!(fFlags & ~0xfff));
1682 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1683 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1684 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1685 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1686 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1687 if (GCPhysLast < GCPhys)
1688 {
1689 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1690 return VERR_INVALID_PARAMETER;
1691 }
1692
1693 /*
1694 * Find range location and check for conflicts.
1695 */
1696 PPGMRAMRANGE pPrev = NULL;
1697 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1698 while (pCur)
1699 {
1700 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1701 {
1702 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1703 return VERR_PGM_RAM_CONFLICT;
1704 }
1705 if (GCPhysLast < pCur->GCPhys)
1706 break;
1707
1708 /* next */
1709 pPrev = pCur;
1710 pCur = pCur->pNextR3;
1711 }
1712
1713 /*
1714 * Allocate RAM range.
1715 * Small ranges are allocated from the heap, big ones have separate mappings.
1716 */
1717 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
1718 PPGMRAMRANGE pNew;
1719 RTGCPTR GCPtrNew;
1720 int rc = VERR_NO_MEMORY;
1721 if (cbRam > PAGE_SIZE / 2)
1722 { /* large */
1723 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
1724 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
1725 if (VBOX_SUCCESS(rc))
1726 {
1727 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
1728 if (VBOX_SUCCESS(rc))
1729 {
1730 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
1731 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1732 }
1733 else
1734 {
1735 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
1736 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
1737 }
1738 }
1739 else
1740 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
1741
1742 }
1743/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
1744 if (RT_FAILURE(rc))
1745 { /* small + fallback (vga) */
1746 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1747 if (VBOX_SUCCESS(rc))
1748 GCPtrNew = MMHyperHC2GC(pVM, pNew);
1749 else
1750 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
1751 }
1752 if (VBOX_SUCCESS(rc))
1753 {
1754 /*
1755 * Initialize the range.
1756 */
1757 pNew->pvHC = pvRam;
1758 pNew->GCPhys = GCPhys;
1759 pNew->GCPhysLast = GCPhysLast;
1760 pNew->cb = cb;
1761 pNew->fFlags = fFlags;
1762 pNew->pavHCChunkHC = NULL;
1763 pNew->pavHCChunkGC = 0;
1764
1765 unsigned iPage = cb >> PAGE_SHIFT;
1766 if (paPages)
1767 {
1768 while (iPage-- > 0)
1769 {
1770 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1771 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
1772 PGM_PAGE_STATE_ALLOCATED);
1773 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1774 }
1775 }
1776 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1777 {
1778 /* Allocate memory for chunk to HC ptr lookup array. */
1779 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
1780 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
1781
1782 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
1783 Assert(pNew->pavHCChunkGC);
1784
1785 /* Physical memory will be allocated on demand. */
1786 while (iPage-- > 0)
1787 {
1788 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
1789 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1790 }
1791 }
1792 else
1793 {
1794 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1795 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
1796 while (iPage-- > 0)
1797 {
1798 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
1799 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1800 }
1801 }
1802
1803 /*
1804 * Insert the new RAM range.
1805 */
1806 pgmLock(pVM);
1807 pNew->pNextR3 = pCur;
1808 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1809 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : NIL_RTGCPTR;
1810 if (pPrev)
1811 {
1812 pPrev->pNextR3 = pNew;
1813 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1814 pPrev->pNextGC = GCPtrNew;
1815 }
1816 else
1817 {
1818 pVM->pgm.s.pRamRangesR3 = pNew;
1819 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1820 pVM->pgm.s.pRamRangesGC = GCPtrNew;
1821 }
1822 pgmUnlock(pVM);
1823 }
1824 return rc;
1825}
1826
1827#ifndef VBOX_WITH_NEW_PHYS_CODE
1828
1829/**
1830 * Register a chunk of a the physical memory range with PGM. MM is responsible
1831 * for the toplevel things - allocation and locking - while PGM is taking
1832 * care of all the details and implements the physical address space virtualization.
1833 *
1834 *
1835 * @returns VBox status.
1836 * @param pVM The VM handle.
1837 * @param pvRam HC virtual address of the RAM range. (page aligned)
1838 * @param GCPhys GC physical address of the RAM range. (page aligned)
1839 * @param cb Size of the RAM range. (page aligned)
1840 * @param fFlags Flags, MM_RAM_*.
1841 * @param paPages Pointer an array of physical page descriptors.
1842 * @param pszDesc Description string.
1843 */
1844PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1845{
1846 NOREF(pszDesc);
1847
1848 /*
1849 * Validate input.
1850 * (Not so important because callers are only MMR3PhysRegister()
1851 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1852 */
1853 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1854
1855 Assert(paPages);
1856 Assert(pvRam);
1857 Assert(!(fFlags & ~0xfff));
1858 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1859 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1860 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1861 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1862 Assert(VM_IS_EMT(pVM));
1863 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1864 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1865
1866 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1867 if (GCPhysLast < GCPhys)
1868 {
1869 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1870 return VERR_INVALID_PARAMETER;
1871 }
1872
1873 /*
1874 * Find existing range location.
1875 */
1876 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1877 while (pRam)
1878 {
1879 RTGCPHYS off = GCPhys - pRam->GCPhys;
1880 if ( off < pRam->cb
1881 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1882 break;
1883
1884 pRam = CTXALLSUFF(pRam->pNext);
1885 }
1886 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1887
1888 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1889 unsigned iPage = cb >> PAGE_SHIFT;
1890 if (paPages)
1891 {
1892 while (iPage-- > 0)
1893 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1894 }
1895 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1896 pRam->pavHCChunkHC[off] = pvRam;
1897
1898 /* Notify the recompiler. */
1899 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1900
1901 return VINF_SUCCESS;
1902}
1903
1904
1905/**
1906 * Allocate missing physical pages for an existing guest RAM range.
1907 *
1908 * @returns VBox status.
1909 * @param pVM The VM handle.
1910 * @param GCPhys GC physical address of the RAM range. (page aligned)
1911 */
1912PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
1913{
1914 RTGCPHYS GCPhys = *pGCPhys;
1915
1916 /*
1917 * Walk range list.
1918 */
1919 pgmLock(pVM);
1920
1921 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1922 while (pRam)
1923 {
1924 RTGCPHYS off = GCPhys - pRam->GCPhys;
1925 if ( off < pRam->cb
1926 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1927 {
1928 bool fRangeExists = false;
1929 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1930
1931 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1932 if (pRam->pavHCChunkHC[off])
1933 fRangeExists = true;
1934
1935 pgmUnlock(pVM);
1936 if (fRangeExists)
1937 return VINF_SUCCESS;
1938 return pgmr3PhysGrowRange(pVM, GCPhys);
1939 }
1940
1941 pRam = CTXALLSUFF(pRam->pNext);
1942 }
1943 pgmUnlock(pVM);
1944 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1945}
1946
1947
1948/**
1949 * Allocate missing physical pages for an existing guest RAM range.
1950 *
1951 * @returns VBox status.
1952 * @param pVM The VM handle.
1953 * @param pRamRange RAM range
1954 * @param GCPhys GC physical address of the RAM range. (page aligned)
1955 */
1956int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1957{
1958 void *pvRam;
1959 int rc;
1960
1961 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1962 if (!VM_IS_EMT(pVM))
1963 {
1964 PVMREQ pReq;
1965 const RTGCPHYS GCPhysParam = GCPhys;
1966
1967 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1968
1969 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
1970 if (VBOX_SUCCESS(rc))
1971 {
1972 rc = pReq->iStatus;
1973 VMR3ReqFree(pReq);
1974 }
1975 return rc;
1976 }
1977
1978 /* Round down to chunk boundary */
1979 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1980
1981 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
1982 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1983
1984 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1985
1986 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1987
1988 for (;;)
1989 {
1990 rc = SUPPageAlloc(cPages, &pvRam);
1991 if (VBOX_SUCCESS(rc))
1992 {
1993
1994 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
1995 if (VBOX_SUCCESS(rc))
1996 return rc;
1997
1998 SUPPageFree(pvRam, cPages);
1999 }
2000
2001 VMSTATE enmVMState = VMR3GetState(pVM);
2002 if (enmVMState != VMSTATE_RUNNING)
2003 {
2004 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
2005 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
2006 return rc;
2007 }
2008
2009 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
2010
2011 /* Pause first, then inform Main. */
2012 rc = VMR3SuspendNoSave(pVM);
2013 AssertRC(rc);
2014
2015 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
2016
2017 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
2018 rc = VMR3WaitForResume(pVM);
2019
2020 /* Retry */
2021 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
2022 }
2023}
2024
2025#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2026
2027
2028/**
2029 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
2030 * flags of existing RAM ranges.
2031 *
2032 * @returns VBox status.
2033 * @param pVM The VM handle.
2034 * @param GCPhys GC physical address of the RAM range. (page aligned)
2035 * @param cb Size of the RAM range. (page aligned)
2036 * @param fFlags The Or flags, MM_RAM_* \#defines.
2037 * @param fMask The and mask for the flags.
2038 */
2039PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
2040{
2041 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
2042
2043 /*
2044 * Validate input.
2045 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
2046 */
2047 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
2048 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2049 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2050 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2051 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2052
2053 /*
2054 * Lookup the range.
2055 */
2056 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
2057 while (pRam && GCPhys > pRam->GCPhysLast)
2058 pRam = CTXALLSUFF(pRam->pNext);
2059 if ( !pRam
2060 || GCPhys > pRam->GCPhysLast
2061 || GCPhysLast < pRam->GCPhys)
2062 {
2063 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
2064 return VERR_INVALID_PARAMETER;
2065 }
2066
2067 /*
2068 * Update the requested flags.
2069 */
2070 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
2071 | fMask;
2072 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
2073 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2074 for ( ; iPage < iPageEnd; iPage++)
2075 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
2076
2077 return VINF_SUCCESS;
2078}
2079
2080
2081/**
2082 * Sets the Address Gate 20 state.
2083 *
2084 * @param pVM VM handle.
2085 * @param fEnable True if the gate should be enabled.
2086 * False if the gate should be disabled.
2087 */
2088PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
2089{
2090 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
2091 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
2092 {
2093 pVM->pgm.s.fA20Enabled = fEnable;
2094 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2095 REMR3A20Set(pVM, fEnable);
2096 }
2097}
2098
2099
2100/**
2101 * Tree enumeration callback for dealing with age rollover.
2102 * It will perform a simple compression of the current age.
2103 */
2104static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2105{
2106 /* Age compression - ASSUMES iNow == 4. */
2107 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2108 if (pChunk->iAge >= UINT32_C(0xffffff00))
2109 pChunk->iAge = 3;
2110 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2111 pChunk->iAge = 2;
2112 else if (pChunk->iAge)
2113 pChunk->iAge = 1;
2114 else /* iAge = 0 */
2115 pChunk->iAge = 4;
2116
2117 /* reinsert */
2118 PVM pVM = (PVM)pvUser;
2119 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2120 pChunk->AgeCore.Key = pChunk->iAge;
2121 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2122 return 0;
2123}
2124
2125
2126/**
2127 * Tree enumeration callback that updates the chunks that have
2128 * been used since the last
2129 */
2130static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2131{
2132 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2133 if (!pChunk->iAge)
2134 {
2135 PVM pVM = (PVM)pvUser;
2136 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2137 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2138 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2139 }
2140
2141 return 0;
2142}
2143
2144
2145/**
2146 * Performs ageing of the ring-3 chunk mappings.
2147 *
2148 * @param pVM The VM handle.
2149 */
2150PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2151{
2152 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2153 pVM->pgm.s.ChunkR3Map.iNow++;
2154 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2155 {
2156 pVM->pgm.s.ChunkR3Map.iNow = 4;
2157 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2158 }
2159 else
2160 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2161}
2162
2163
2164/**
2165 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2166 */
2167typedef struct PGMR3PHYSCHUNKUNMAPCB
2168{
2169 PVM pVM; /**< The VM handle. */
2170 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2171} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2172
2173
2174/**
2175 * Callback used to find the mapping that's been unused for
2176 * the longest time.
2177 */
2178static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2179{
2180 do
2181 {
2182 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2183 if ( pChunk->iAge
2184 && !pChunk->cRefs)
2185 {
2186 /*
2187 * Check that it's not in any of the TLBs.
2188 */
2189 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2190 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2191 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2192 {
2193 pChunk = NULL;
2194 break;
2195 }
2196 if (pChunk)
2197 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2198 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2199 {
2200 pChunk = NULL;
2201 break;
2202 }
2203 if (pChunk)
2204 {
2205 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2206 return 1; /* done */
2207 }
2208 }
2209
2210 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2211 pNode = pNode->pList;
2212 } while (pNode);
2213 return 0;
2214}
2215
2216
2217/**
2218 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2219 *
2220 * The candidate will not be part of any TLBs, so no need to flush
2221 * anything afterwards.
2222 *
2223 * @returns Chunk id.
2224 * @param pVM The VM handle.
2225 */
2226static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2227{
2228 /*
2229 * Do tree ageing first?
2230 */
2231 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2232 PGMR3PhysChunkAgeing(pVM);
2233
2234 /*
2235 * Enumerate the age tree starting with the left most node.
2236 */
2237 PGMR3PHYSCHUNKUNMAPCB Args;
2238 Args.pVM = pVM;
2239 Args.pChunk = NULL;
2240 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2241 return Args.pChunk->Core.Key;
2242 return INT32_MAX;
2243}
2244
2245
2246/**
2247 * Maps the given chunk into the ring-3 mapping cache.
2248 *
2249 * This will call ring-0.
2250 *
2251 * @returns VBox status code.
2252 * @param pVM The VM handle.
2253 * @param idChunk The chunk in question.
2254 * @param ppChunk Where to store the chunk tracking structure.
2255 *
2256 * @remarks Called from within the PGM critical section.
2257 */
2258int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
2259{
2260 int rc;
2261 /*
2262 * Allocate a new tracking structure first.
2263 */
2264#if 0 /* for later when we've got a separate mapping method for ring-0. */
2265 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
2266 AssertReturn(pChunk, VERR_NO_MEMORY);
2267#else
2268 PPGMCHUNKR3MAP pChunk;
2269 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
2270 AssertRCReturn(rc, rc);
2271#endif
2272 pChunk->Core.Key = idChunk;
2273 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
2274 pChunk->iAge = 0;
2275 pChunk->cRefs = 0;
2276 pChunk->cPermRefs = 0;
2277 pChunk->pv = NULL;
2278
2279 /*
2280 * Request the ring-0 part to map the chunk in question and if
2281 * necessary unmap another one to make space in the mapping cache.
2282 */
2283 GMMMAPUNMAPCHUNKREQ Req;
2284 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
2285 Req.Hdr.cbReq = sizeof(Req);
2286 Req.pvR3 = NULL;
2287 Req.idChunkMap = idChunk;
2288 Req.idChunkUnmap = INT32_MAX;
2289 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
2290 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
2291 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
2292 if (VBOX_SUCCESS(rc))
2293 {
2294 /*
2295 * Update the tree.
2296 */
2297 /* insert the new one. */
2298 AssertPtr(Req.pvR3);
2299 pChunk->pv = Req.pvR3;
2300 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2301 AssertRelease(fRc);
2302 pVM->pgm.s.ChunkR3Map.c++;
2303
2304 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2305 AssertRelease(fRc);
2306
2307 /* remove the unmapped one. */
2308 if (Req.idChunkUnmap != INT32_MAX)
2309 {
2310 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2311 AssertRelease(pUnmappedChunk);
2312 pUnmappedChunk->pv = NULL;
2313 pUnmappedChunk->Core.Key = UINT32_MAX;
2314#if 0 /* for later when we've got a separate mapping method for ring-0. */
2315 MMR3HeapFree(pUnmappedChunk);
2316#else
2317 MMHyperFree(pVM, pUnmappedChunk);
2318#endif
2319 pVM->pgm.s.ChunkR3Map.c--;
2320 }
2321 }
2322 else
2323 {
2324 AssertRC(rc);
2325#if 0 /* for later when we've got a separate mapping method for ring-0. */
2326 MMR3HeapFree(pChunk);
2327#else
2328 MMHyperFree(pVM, pChunk);
2329#endif
2330 pChunk = NULL;
2331 }
2332
2333 *ppChunk = pChunk;
2334 return rc;
2335}
2336
2337
2338/**
2339 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
2340 *
2341 * @returns see pgmR3PhysChunkMap.
2342 * @param pVM The VM handle.
2343 * @param idChunk The chunk to map.
2344 */
2345PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
2346{
2347 PPGMCHUNKR3MAP pChunk;
2348 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
2349}
2350
2351
2352/**
2353 * Invalidates the TLB for the ring-3 mapping cache.
2354 *
2355 * @param pVM The VM handle.
2356 */
2357PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
2358{
2359 pgmLock(pVM);
2360 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2361 {
2362 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
2363 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
2364 }
2365 pgmUnlock(pVM);
2366}
2367
2368
2369/**
2370 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
2371 *
2372 * @returns The following VBox status codes.
2373 * @retval VINF_SUCCESS on success. FF cleared.
2374 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
2375 *
2376 * @param pVM The VM handle.
2377 */
2378PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
2379{
2380 pgmLock(pVM);
2381 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2382 if (rc == VERR_GMM_SEED_ME)
2383 {
2384 void *pvChunk;
2385 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
2386 if (VBOX_SUCCESS(rc))
2387 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
2388 if (VBOX_FAILURE(rc))
2389 {
2390 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
2391 rc = VINF_EM_NO_MEMORY;
2392 }
2393 }
2394 pgmUnlock(pVM);
2395 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
2396 return rc;
2397}
2398
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette