VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 11945

Last change on this file since 11945 was 11299, checked in by vboxsync, 16 years ago

mm: MMHyperXXToGC -> MMHyperXXToRC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.5 KB
Line 
1/* $Id: PGMPhys.cpp 11299 2008-08-08 22:56:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/csam.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <VBox/log.h>
44#include <iprt/thread.h>
45#include <iprt/string.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51/*static - shut up warning */
52DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
53
54
55
56/*
57 * PGMR3PhysReadU8-64
58 * PGMR3PhysWriteU8-64
59 */
60#define PGMPHYSFN_READNAME PGMR3PhysReadU8
61#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
62#define PGMPHYS_DATASIZE 1
63#define PGMPHYS_DATATYPE uint8_t
64#include "PGMPhysRWTmpl.h"
65
66#define PGMPHYSFN_READNAME PGMR3PhysReadU16
67#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
68#define PGMPHYS_DATASIZE 2
69#define PGMPHYS_DATATYPE uint16_t
70#include "PGMPhysRWTmpl.h"
71
72#define PGMPHYSFN_READNAME PGMR3PhysReadU32
73#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
74#define PGMPHYS_DATASIZE 4
75#define PGMPHYS_DATATYPE uint32_t
76#include "PGMPhysRWTmpl.h"
77
78#define PGMPHYSFN_READNAME PGMR3PhysReadU64
79#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
80#define PGMPHYS_DATASIZE 8
81#define PGMPHYS_DATATYPE uint64_t
82#include "PGMPhysRWTmpl.h"
83
84
85
86/**
87 * Links a new RAM range into the list.
88 *
89 * @param pVM Pointer to the shared VM structure.
90 * @param pNew Pointer to the new list entry.
91 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
92 */
93static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
94{
95 pgmLock(pVM);
96
97 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
98 pNew->pNextR3 = pRam;
99 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
100 pNew->pNextGC = pRam ? MMHyperCCToRC(pVM, pRam) : NIL_RTGCPTR;
101
102 if (pPrev)
103 {
104 pPrev->pNextR3 = pNew;
105 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
106 pPrev->pNextGC = MMHyperCCToRC(pVM, pNew);
107 }
108 else
109 {
110 pVM->pgm.s.pRamRangesR3 = pNew;
111 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
112 pVM->pgm.s.pRamRangesGC = MMHyperCCToRC(pVM, pNew);
113 }
114
115 pgmUnlock(pVM);
116}
117
118
119/**
120 * Unlink an existing RAM range from the list.
121 *
122 * @param pVM Pointer to the shared VM structure.
123 * @param pRam Pointer to the new list entry.
124 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
125 */
126static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
127{
128 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
129
130 pgmLock(pVM);
131
132 PPGMRAMRANGE pNext = pRam->pNextR3;
133 if (pPrev)
134 {
135 pPrev->pNextR3 = pNext;
136 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
137 pPrev->pNextGC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTGCPTR;
138 }
139 else
140 {
141 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
142 pVM->pgm.s.pRamRangesR3 = pNext;
143 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
144 pVM->pgm.s.pRamRangesGC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTGCPTR;
145 }
146
147 pgmUnlock(pVM);
148}
149
150
151/**
152 * Unlink an existing RAM range from the list.
153 *
154 * @param pVM Pointer to the shared VM structure.
155 * @param pRam Pointer to the new list entry.
156 */
157static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
158{
159 /* find prev. */
160 PPGMRAMRANGE pPrev = NULL;
161 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
162 while (pCur != pRam)
163 {
164 pPrev = pCur;
165 pCur = pCur->pNextR3;
166 }
167 AssertFatal(pCur);
168
169 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
170}
171
172
173
174/**
175 * Sets up a range RAM.
176 *
177 * This will check for conflicting registrations, make a resource
178 * reservation for the memory (with GMM), and setup the per-page
179 * tracking structures (PGMPAGE).
180 *
181 * @returns VBox stutus code.
182 * @param pVM Pointer to the shared VM structure.
183 * @param GCPhys The physical address of the RAM.
184 * @param cb The size of the RAM.
185 * @param pszDesc The description - not copied, so, don't free or change it.
186 */
187PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
188{
189 /*
190 * Validate input.
191 */
192 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
193 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
194 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
195 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
196 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
197 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
198 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
199 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
200
201 /*
202 * Find range location and check for conflicts.
203 * (We don't lock here because the locking by EMT is only required on update.)
204 */
205 PPGMRAMRANGE pPrev = NULL;
206 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
207 while (pRam && GCPhysLast >= pRam->GCPhys)
208 {
209 if ( GCPhysLast >= pRam->GCPhys
210 && GCPhys <= pRam->GCPhysLast)
211 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
212 GCPhys, GCPhysLast, pszDesc,
213 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
214 VERR_PGM_RAM_CONFLICT);
215
216 /* next */
217 pPrev = pRam;
218 pRam = pRam->pNextR3;
219 }
220
221 /*
222 * Register it with GMM (the API bitches).
223 */
224 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
225 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
226 if (RT_FAILURE(rc))
227 return rc;
228
229 /*
230 * Allocate RAM range.
231 */
232 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
233 PPGMRAMRANGE pNew;
234 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
235 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
236
237 /*
238 * Initialize the range.
239 */
240 pNew->GCPhys = GCPhys;
241 pNew->GCPhysLast = GCPhysLast;
242 pNew->pszDesc = pszDesc;
243 pNew->cb = cb;
244 pNew->fFlags = 0;
245
246 pNew->pvHC = NULL;
247 pNew->pavHCChunkHC = NULL;
248 pNew->pavHCChunkGC = 0;
249
250#ifndef VBOX_WITH_NEW_PHYS_CODE
251 /* Allocate memory for chunk to HC ptr lookup array. */
252 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
253 AssertRCReturn(rc, rc);
254 pNew->pavHCChunkGC = MMHyperCCToRC(pVM, pNew->pavHCChunkHC);
255 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
256
257#endif
258 RTGCPHYS iPage = cPages;
259 while (iPage-- > 0)
260 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
261
262 /*
263 * Insert the new RAM range.
264 */
265 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
266
267 /*
268 * Notify REM.
269 */
270#ifdef VBOX_WITH_NEW_PHYS_CODE
271 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
272#else
273 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
274#endif
275
276 return VINF_SUCCESS;
277}
278
279
280/**
281 * Resets (zeros) the RAM.
282 *
283 * ASSUMES that the caller owns the PGM lock.
284 *
285 * @returns VBox status code.
286 * @param pVM Pointer to the shared VM structure.
287 */
288int pgmR3PhysRamReset(PVM pVM)
289{
290 /*
291 * Walk the ram ranges.
292 */
293 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
294 {
295 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
296#ifdef VBOX_WITH_NEW_PHYS_CODE
297 if (!pVM->pgm.f.fRamPreAlloc)
298 {
299 /* Replace all RAM pages by ZERO pages. */
300 while (iPage-- > 0)
301 {
302 PPGMPAGE pPage = &pRam->aPages[iPage];
303 switch (PGM_PAGE_GET_TYPE(pPage))
304 {
305 case PGMPAGETYPE_RAM:
306 if (!PGM_PAGE_IS_ZERO(pPage))
307 pgmPhysFreePage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
308 break;
309
310 case PGMPAGETYPE_MMIO2:
311 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
312 case PGMPAGETYPE_ROM:
313 case PGMPAGETYPE_MMIO:
314 break;
315 default:
316 AssertFailed();
317 }
318 } /* for each page */
319 }
320 else
321#endif
322 {
323 /* Zero the memory. */
324 while (iPage-- > 0)
325 {
326 PPGMPAGE pPage = &pRam->aPages[iPage];
327 switch (PGM_PAGE_GET_TYPE(pPage))
328 {
329#ifndef VBOX_WITH_NEW_PHYS_CODE
330 case PGMPAGETYPE_INVALID:
331 case PGMPAGETYPE_RAM:
332 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
333 {
334 /* shadow ram is reloaded elsewhere. */
335 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
336 continue;
337 }
338 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
339 {
340 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
341 if (pRam->pavHCChunkHC[iChunk])
342 ASMMemZero32((char *)pRam->pavHCChunkHC[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
343 }
344 else
345 ASMMemZero32((char *)pRam->pvHC + (iPage << PAGE_SHIFT), PAGE_SIZE);
346 break;
347#else /* VBOX_WITH_NEW_PHYS_CODE */
348 case PGMPAGETYPE_RAM:
349 switch (PGM_PAGE_GET_STATE(pPage))
350 {
351 case PGM_PAGE_STATE_ZERO:
352 break;
353 case PGM_PAGE_STATE_SHARED:
354 case PGM_PAGE_STATE_WRITE_MONITORED:
355 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
356 AssertLogRelRCReturn(rc, rc);
357 case PGM_PAGE_STATE_ALLOCATED:
358 {
359 void *pvPage;
360 PPGMPAGEMAP pMapIgnored;
361 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT), &pMapIgnored, &pvPage);
362 AssertLogRelRCReturn(rc, rc);
363 ASMMemZeroPage(pvPage);
364 break;
365 }
366 }
367 break;
368#endif /* VBOX_WITH_NEW_PHYS_CODE */
369
370 case PGMPAGETYPE_MMIO2:
371 case PGMPAGETYPE_ROM_SHADOW:
372 case PGMPAGETYPE_ROM:
373 case PGMPAGETYPE_MMIO:
374 break;
375 default:
376 AssertFailed();
377
378 }
379 } /* for each page */
380 }
381
382 }
383
384 return VINF_SUCCESS;
385}
386
387
388/**
389 * This is the interface IOM is using to register an MMIO region.
390 *
391 * It will check for conflicts and ensure that a RAM range structure
392 * is present before calling the PGMR3HandlerPhysicalRegister API to
393 * register the callbacks.
394 *
395 * @returns VBox status code.
396 *
397 * @param pVM Pointer to the shared VM structure.
398 * @param GCPhys The start of the MMIO region.
399 * @param cb The size of the MMIO region.
400 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
401 * @param pvUserR3 The user argument for R3.
402 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
403 * @param pvUserR0 The user argument for R0.
404 * @param pfnHandlerGC The address of the GC handler. (IOMMMIOHandler)
405 * @param pvUserGC The user argument for GC.
406 * @param pszDesc The description of the MMIO region.
407 */
408PDMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
409 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
410 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
411 RCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
412 R3PTRTYPE(const char *) pszDesc)
413{
414 /*
415 * Assert on some assumption.
416 */
417 VM_ASSERT_EMT(pVM);
418 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
419 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
420 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
421 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
422
423 /*
424 * Make sure there's a RAM range structure for the region.
425 */
426 int rc;
427 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
428 bool fRamExists = false;
429 PPGMRAMRANGE pRamPrev = NULL;
430 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
431 while (pRam && GCPhysLast >= pRam->GCPhys)
432 {
433 if ( GCPhysLast >= pRam->GCPhys
434 && GCPhys <= pRam->GCPhysLast)
435 {
436 /* Simplification: all within the same range. */
437 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
438 && GCPhysLast <= pRam->GCPhysLast,
439 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
440 GCPhys, GCPhysLast, pszDesc,
441 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
442 VERR_PGM_RAM_CONFLICT);
443
444 /* Check that it's all RAM or MMIO pages. */
445 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
446 uint32_t cLeft = cb >> PAGE_SHIFT;
447 while (cLeft-- > 0)
448 {
449 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
450 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
451 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
452 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
453 VERR_PGM_RAM_CONFLICT);
454 pPage++;
455 }
456
457 /* Looks good. */
458 fRamExists = true;
459 break;
460 }
461
462 /* next */
463 pRamPrev = pRam;
464 pRam = pRam->pNextR3;
465 }
466 PPGMRAMRANGE pNew;
467 if (fRamExists)
468 pNew = NULL;
469 else
470 {
471 /*
472 * No RAM range, insert an ad-hoc one.
473 *
474 * Note that we don't have to tell REM about this range because
475 * PGMHandlerPhysicalRegisterEx will do that for us.
476 */
477 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
478
479 const uint32_t cPages = cb >> PAGE_SHIFT;
480 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
481 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
482 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
483
484 /* Initialize the range. */
485 pNew->GCPhys = GCPhys;
486 pNew->GCPhysLast = GCPhysLast;
487 pNew->pszDesc = pszDesc;
488 pNew->cb = cb;
489 pNew->fFlags = 0; /* Some MMIO flag here? */
490
491 pNew->pvHC = NULL;
492 pNew->pavHCChunkHC = NULL;
493 pNew->pavHCChunkGC = 0;
494
495 uint32_t iPage = cPages;
496 while (iPage-- > 0)
497 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
498 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
499
500 /* link it */
501 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
502 }
503
504 /*
505 * Register the access handler.
506 */
507 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
508 pfnHandlerR3, pvUserR3,
509 pfnHandlerR0, pvUserR0,
510 pfnHandlerGC, pvUserGC, pszDesc);
511 if ( RT_FAILURE(rc)
512 && !fRamExists)
513 {
514 /* remove the ad-hoc range. */
515 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
516 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
517 MMHyperFree(pVM, pRam);
518 }
519
520 return rc;
521}
522
523
524/**
525 * This is the interface IOM is using to register an MMIO region.
526 *
527 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
528 * any ad-hoc PGMRAMRANGE left behind.
529 *
530 * @returns VBox status code.
531 * @param pVM Pointer to the shared VM structure.
532 * @param GCPhys The start of the MMIO region.
533 * @param cb The size of the MMIO region.
534 */
535PDMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
536{
537 VM_ASSERT_EMT(pVM);
538
539 /*
540 * First deregister the handler, then check if we should remove the ram range.
541 */
542 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
543 if (RT_SUCCESS(rc))
544 {
545 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
546 PPGMRAMRANGE pRamPrev = NULL;
547 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
548 while (pRam && GCPhysLast >= pRam->GCPhys)
549 {
550 /*if ( GCPhysLast >= pRam->GCPhys
551 && GCPhys <= pRam->GCPhysLast) - later */
552 if ( GCPhysLast == pRam->GCPhysLast
553 && GCPhys == pRam->GCPhys)
554 {
555 Assert(pRam->cb == cb);
556
557 /*
558 * See if all the pages are dead MMIO pages.
559 */
560 bool fAllMMIO = true;
561 PPGMPAGE pPage = &pRam->aPages[0];
562 uint32_t cLeft = cb >> PAGE_SHIFT;
563 while (cLeft-- > 0)
564 {
565 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
566 /*|| not-out-of-action later */)
567 {
568 fAllMMIO = false;
569 break;
570 }
571 pPage++;
572 }
573
574 /*
575 * Unlink it and free if it's all MMIO.
576 */
577 if (fAllMMIO)
578 {
579 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
580 GCPhys, GCPhysLast, pRam->pszDesc));
581
582 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
583 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
584 MMHyperFree(pVM, pRam);
585 }
586 break;
587 }
588
589 /* next */
590 pRamPrev = pRam;
591 pRam = pRam->pNextR3;
592 }
593 }
594
595 return rc;
596}
597
598
599/**
600 * Locate a MMIO2 range.
601 *
602 * @returns Pointer to the MMIO2 range.
603 * @param pVM Pointer to the shared VM structure.
604 * @param pDevIns The device instance owning the region.
605 * @param iRegion The region.
606 */
607DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
608{
609 /*
610 * Search the list.
611 */
612 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
613 if (pCur->pDevInsR3 == pDevIns)
614 return pCur;
615 return NULL;
616}
617
618
619/**
620 * Allocate and register a MMIO2 region.
621 *
622 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
623 * RAM associated with a device. It is also non-shared memory with a
624 * permanent ring-3 mapping and page backing (presently).
625 *
626 * A MMIO2 range may overlap with base memory if a lot of RAM
627 * is configured for the VM, in which case we'll drop the base
628 * memory pages. Presently we will make no attempt to preserve
629 * anything that happens to be present in the base memory that
630 * is replaced, this is of course incorrectly but it's too much
631 * effort.
632 *
633 * @returns VBox status code.
634 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
635 * @retval VERR_ALREADY_EXISTS if the region already exists.
636 *
637 * @param pVM Pointer to the shared VM structure.
638 * @param pDevIns The device instance owning the region.
639 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
640 * this number has to be the number of that region. Otherwise
641 * it can be any number safe UINT8_MAX.
642 * @param cb The size of the region. Must be page aligned.
643 * @param fFlags Reserved for future use, must be zero.
644 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
645 * @param pszDesc The description.
646 */
647PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
648{
649 /*
650 * Validate input.
651 */
652 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
653 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
654 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
655 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
656 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
657 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
658 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
659 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
660 AssertReturn(cb, VERR_INVALID_PARAMETER);
661 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
662
663 const uint32_t cPages = cb >> PAGE_SHIFT;
664 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
665 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
666
667 /*
668 * Try reserve and allocate the backing memory first as this is what is
669 * most likely to fail.
670 */
671 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
672 if (RT_FAILURE(rc))
673 return rc;
674
675 void *pvPages;
676 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
677 if (RT_SUCCESS(rc))
678 rc = SUPPageAllocLockedEx(cPages, &pvPages, paPages);
679 if (RT_SUCCESS(rc))
680 {
681 /*
682 * Create the MMIO2 range record for it.
683 */
684 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
685 PPGMMMIO2RANGE pNew;
686 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
687 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
688 if (RT_SUCCESS(rc))
689 {
690 pNew->pDevInsR3 = pDevIns;
691 pNew->pvR3 = pvPages;
692 //pNew->pNext = NULL;
693 //pNew->fMapped = false;
694 //pNew->fOverlapping = false;
695 pNew->iRegion = iRegion;
696 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
697 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
698 pNew->RamRange.pszDesc = pszDesc;
699 pNew->RamRange.cb = cb;
700 //pNew->RamRange.fFlags = 0;
701
702 pNew->RamRange.pvHC = pvPages; ///@todo remove this
703 pNew->RamRange.pavHCChunkHC = NULL; ///@todo remove this
704 pNew->RamRange.pavHCChunkGC = 0; ///@todo remove this
705
706 uint32_t iPage = cPages;
707 while (iPage-- > 0)
708 {
709 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
710 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
711 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
712 }
713
714 /*
715 * Link it into the list.
716 * Since there is no particular order, just push it.
717 */
718 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
719 pVM->pgm.s.pMmio2RangesR3 = pNew;
720
721 *ppv = pvPages;
722 RTMemTmpFree(paPages);
723 return VINF_SUCCESS;
724 }
725
726 SUPPageFreeLocked(pvPages, cPages);
727 }
728 RTMemTmpFree(paPages);
729 MMR3AdjustFixedReservation(pVM, -cPages, pszDesc);
730 return rc;
731}
732
733
734/**
735 * Deregisters and frees a MMIO2 region.
736 *
737 * Any physical (and virtual) access handlers registered for the region must
738 * be deregistered before calling this function.
739 *
740 * @returns VBox status code.
741 * @param pVM Pointer to the shared VM structure.
742 * @param pDevIns The device instance owning the region.
743 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
744 */
745PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
746{
747 /*
748 * Validate input.
749 */
750 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
751 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
752 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
753
754 int rc = VINF_SUCCESS;
755 unsigned cFound = 0;
756 PPGMMMIO2RANGE pPrev = NULL;
757 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
758 while (pCur)
759 {
760 if ( pCur->pDevInsR3 == pDevIns
761 && ( iRegion == UINT32_MAX
762 || pCur->iRegion == iRegion))
763 {
764 cFound++;
765
766 /*
767 * Unmap it if it's mapped.
768 */
769 if (pCur->fMapped)
770 {
771 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
772 AssertRC(rc2);
773 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
774 rc = rc2;
775 }
776
777 /*
778 * Unlink it
779 */
780 PPGMMMIO2RANGE pNext = pCur->pNextR3;
781 if (pPrev)
782 pPrev->pNextR3 = pNext;
783 else
784 pVM->pgm.s.pMmio2RangesR3 = pNext;
785 pCur->pNextR3 = NULL;
786
787 /*
788 * Free the memory.
789 */
790 int rc2 = SUPPageFreeLocked(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
791 AssertRC(rc2);
792 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
793 rc = rc2;
794
795 rc2 = MMR3AdjustFixedReservation(pVM, -(pCur->RamRange.cb >> PAGE_SHIFT), pCur->RamRange.pszDesc);
796 AssertRC(rc2);
797 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
798 rc = rc2;
799
800 /* we're leaking hyper memory here if done at runtime. */
801 Assert( VMR3GetState(pVM) == VMSTATE_OFF
802 || VMR3GetState(pVM) == VMSTATE_DESTROYING
803 || VMR3GetState(pVM) == VMSTATE_TERMINATED
804 || VMR3GetState(pVM) == VMSTATE_CREATING);
805 /*rc = MMHyperFree(pVM, pCur);
806 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
807
808 /* next */
809 pCur = pNext;
810 }
811 else
812 {
813 pPrev = pCur;
814 pCur = pCur->pNextR3;
815 }
816 }
817
818 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
819}
820
821
822/**
823 * Maps a MMIO2 region.
824 *
825 * This is done when a guest / the bios / state loading changes the
826 * PCI config. The replacing of base memory has the same restrictions
827 * as during registration, of course.
828 *
829 * @returns VBox status code.
830 *
831 * @param pVM Pointer to the shared VM structure.
832 * @param pDevIns The
833 */
834PDMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
835{
836 /*
837 * Validate input
838 */
839 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
840 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
841 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
842 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
843 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
844 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
845
846 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
847 AssertReturn(pCur, VERR_NOT_FOUND);
848 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
849 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
850 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
851
852 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
853 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
854
855 /*
856 * Find our location in the ram range list, checking for
857 * restriction we don't bother implementing yet (partially overlapping).
858 */
859 bool fRamExists = false;
860 PPGMRAMRANGE pRamPrev = NULL;
861 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
862 while (pRam && GCPhysLast >= pRam->GCPhys)
863 {
864 if ( GCPhys <= pRam->GCPhysLast
865 && GCPhysLast >= pRam->GCPhys)
866 {
867 /* completely within? */
868 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
869 && GCPhysLast <= pRam->GCPhysLast,
870 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
871 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
872 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
873 VERR_PGM_RAM_CONFLICT);
874 fRamExists = true;
875 break;
876 }
877
878 /* next */
879 pRamPrev = pRam;
880 pRam = pRam->pNextR3;
881 }
882 if (fRamExists)
883 {
884 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
885 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
886 while (cPagesLeft-- > 0)
887 {
888 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
889 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
890 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
891 VERR_PGM_RAM_CONFLICT);
892 pPage++;
893 }
894 }
895 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
896 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
897
898 /*
899 * Make the changes.
900 */
901 pgmLock(pVM);
902
903 pCur->RamRange.GCPhys = GCPhys;
904 pCur->RamRange.GCPhysLast = GCPhysLast;
905 pCur->fMapped = true;
906 pCur->fOverlapping = fRamExists;
907
908 if (fRamExists)
909 {
910 /* replace the pages, freeing all present RAM pages. */
911 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
912 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
913 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
914 while (cPagesLeft-- > 0)
915 {
916 pgmPhysFreePage(pVM, pPageDst, GCPhys);
917
918 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
919 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
920 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
921 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
922
923 GCPhys += PAGE_SIZE;
924 pPageSrc++;
925 pPageDst++;
926 }
927 }
928 else
929 {
930 /* link in the ram range */
931 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
932 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, 0);
933 }
934
935 pgmUnlock(pVM);
936
937 return VINF_SUCCESS;
938}
939
940
941/**
942 * Unmaps a MMIO2 region.
943 *
944 * This is done when a guest / the bios / state loading changes the
945 * PCI config. The replacing of base memory has the same restrictions
946 * as during registration, of course.
947 */
948PDMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
949{
950 /*
951 * Validate input
952 */
953 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
954 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
955 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
956 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
957 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
958 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
959
960 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
961 AssertReturn(pCur, VERR_NOT_FOUND);
962 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
963 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
964 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
965
966 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
967 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
968
969 /*
970 * Unmap it.
971 */
972 pgmLock(pVM);
973
974 if (pCur->fOverlapping)
975 {
976 /* Restore the RAM pages we've replaced. */
977 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
978 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
979 pRam = pRam->pNextR3;
980
981#ifdef RT_STRICT
982 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
983#endif
984 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
985 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
986 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
987 while (cPagesLeft-- > 0)
988 {
989 PGM_PAGE_SET_HCPHYS(pPageDst, pVM->pgm.s.HCPhysZeroPg);
990 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
991 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
992
993 pPageDst++;
994 }
995 }
996 else
997 {
998 REMR3NotifyPhysReserve(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
999 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
1000 }
1001
1002 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
1003 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
1004 pCur->fOverlapping = false;
1005 pCur->fMapped = false;
1006
1007 pgmUnlock(pVM);
1008
1009 return VINF_SUCCESS;
1010}
1011
1012
1013/**
1014 * Checks if the given address is an MMIO2 base address or not.
1015 *
1016 * @returns true/false accordingly.
1017 * @param pVM Pointer to the shared VM structure.
1018 * @param pDevIns The owner of the memory, optional.
1019 * @param GCPhys The address to check.
1020 */
1021PDMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1022{
1023 /*
1024 * Validate input
1025 */
1026 VM_ASSERT_EMT_RETURN(pVM, false);
1027 AssertPtrReturn(pDevIns, false);
1028 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1029 AssertReturn(GCPhys != 0, false);
1030 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1031
1032 /*
1033 * Search the list.
1034 */
1035 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1036 if (pCur->RamRange.GCPhys == GCPhys)
1037 {
1038 Assert(pCur->fMapped);
1039 return true;
1040 }
1041 return false;
1042}
1043
1044
1045/**
1046 * Gets the HC physical address of a page in the MMIO2 region.
1047 *
1048 * This is API is intended for MMHyper and shouldn't be called
1049 * by anyone else...
1050 *
1051 * @returns VBox status code.
1052 * @param pVM Pointer to the shared VM structure.
1053 * @param pDevIns The owner of the memory, optional.
1054 * @param iRegion The region.
1055 * @param off The page expressed an offset into the MMIO2 region.
1056 * @param pHCPhys Where to store the result.
1057 */
1058PDMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1059{
1060 /*
1061 * Validate input
1062 */
1063 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1064 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1065 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1066
1067 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1068 AssertReturn(pCur, VERR_NOT_FOUND);
1069 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1070
1071 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1072 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Registers a ROM image.
1079 *
1080 * Shadowed ROM images requires double the amount of backing memory, so,
1081 * don't use that unless you have to. Shadowing of ROM images is process
1082 * where we can select where the reads go and where the writes go. On real
1083 * hardware the chipset provides means to configure this. We provide
1084 * PGMR3PhysProtectROM() for this purpose.
1085 *
1086 * A read-only copy of the ROM image will always be kept around while we
1087 * will allocate RAM pages for the changes on demand (unless all memory
1088 * is configured to be preallocated).
1089 *
1090 * @returns VBox status.
1091 * @param pVM VM Handle.
1092 * @param pDevIns The device instance owning the ROM.
1093 * @param GCPhys First physical address in the range.
1094 * Must be page aligned!
1095 * @param cbRange The size of the range (in bytes).
1096 * Must be page aligned!
1097 * @param pvBinary Pointer to the binary data backing the ROM image.
1098 * This must be exactly \a cbRange in size.
1099 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
1100 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
1101 * @param pszDesc Pointer to description string. This must not be freed.
1102 *
1103 * @remark There is no way to remove the rom, automatically on device cleanup or
1104 * manually from the device yet. This isn't difficult in any way, it's
1105 * just not something we expect to be necessary for a while.
1106 */
1107PGMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1108 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
1109{
1110 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
1111 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
1112
1113 /*
1114 * Validate input.
1115 */
1116 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1117 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1118 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1119 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1120 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1121 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
1122 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1123 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
1124 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
1125
1126 const uint32_t cPages = cb >> PAGE_SHIFT;
1127
1128 /*
1129 * Find the ROM location in the ROM list first.
1130 */
1131 PPGMROMRANGE pRomPrev = NULL;
1132 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
1133 while (pRom && GCPhysLast >= pRom->GCPhys)
1134 {
1135 if ( GCPhys <= pRom->GCPhysLast
1136 && GCPhysLast >= pRom->GCPhys)
1137 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1138 GCPhys, GCPhysLast, pszDesc,
1139 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
1140 VERR_PGM_RAM_CONFLICT);
1141 /* next */
1142 pRomPrev = pRom;
1143 pRom = pRom->pNextR3;
1144 }
1145
1146 /*
1147 * Find the RAM location and check for conflicts.
1148 *
1149 * Conflict detection is a bit different than for RAM
1150 * registration since a ROM can be located within a RAM
1151 * range. So, what we have to check for is other memory
1152 * types (other than RAM that is) and that we don't span
1153 * more than one RAM range (layz).
1154 */
1155 bool fRamExists = false;
1156 PPGMRAMRANGE pRamPrev = NULL;
1157 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1158 while (pRam && GCPhysLast >= pRam->GCPhys)
1159 {
1160 if ( GCPhys <= pRam->GCPhysLast
1161 && GCPhysLast >= pRam->GCPhys)
1162 {
1163 /* completely within? */
1164 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1165 && GCPhysLast <= pRam->GCPhysLast,
1166 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
1167 GCPhys, GCPhysLast, pszDesc,
1168 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1169 VERR_PGM_RAM_CONFLICT);
1170 fRamExists = true;
1171 break;
1172 }
1173
1174 /* next */
1175 pRamPrev = pRam;
1176 pRam = pRam->pNextR3;
1177 }
1178 if (fRamExists)
1179 {
1180 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1181 uint32_t cPagesLeft = cPages;
1182 while (cPagesLeft-- > 0)
1183 {
1184 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1185 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
1186 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
1187 VERR_PGM_RAM_CONFLICT);
1188 Assert(PGM_PAGE_IS_ZERO(pPage));
1189 pPage++;
1190 }
1191 }
1192
1193 /*
1194 * Update the base memory reservation if necessary.
1195 */
1196 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
1197 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1198 cExtraBaseCost += cPages;
1199 if (cExtraBaseCost)
1200 {
1201 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
1202 if (RT_FAILURE(rc))
1203 return rc;
1204 }
1205
1206 /*
1207 * Allocate memory for the virgin copy of the RAM.
1208 */
1209 PGMMALLOCATEPAGESREQ pReq;
1210 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
1211 AssertRCReturn(rc, rc);
1212
1213 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1214 {
1215 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
1216 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
1217 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
1218 }
1219
1220 pgmLock(pVM);
1221 rc = GMMR3AllocatePagesPerform(pVM, pReq);
1222 pgmUnlock(pVM);
1223 if (RT_FAILURE(rc))
1224 {
1225 GMMR3AllocatePagesCleanup(pReq);
1226 return rc;
1227 }
1228
1229 /*
1230 * Allocate the new ROM range and RAM range (if necessary).
1231 */
1232 PPGMROMRANGE pRomNew;
1233 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
1234 if (RT_SUCCESS(rc))
1235 {
1236 PPGMRAMRANGE pRamNew = NULL;
1237 if (!fRamExists)
1238 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
1239 if (RT_SUCCESS(rc))
1240 {
1241 pgmLock(pVM);
1242
1243 /*
1244 * Initialize and insert the RAM range (if required).
1245 */
1246 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
1247 if (!fRamExists)
1248 {
1249 pRamNew->GCPhys = GCPhys;
1250 pRamNew->GCPhysLast = GCPhysLast;
1251 pRamNew->pszDesc = pszDesc;
1252 pRamNew->cb = cb;
1253 pRamNew->fFlags = 0;
1254 pRamNew->pvHC = NULL;
1255
1256 PPGMPAGE pPage = &pRamNew->aPages[0];
1257 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1258 {
1259 PGM_PAGE_INIT(pPage,
1260 pReq->aPages[iPage].HCPhysGCPhys,
1261 pReq->aPages[iPage].idPage,
1262 PGMPAGETYPE_ROM,
1263 PGM_PAGE_STATE_ALLOCATED);
1264
1265 pRomPage->Virgin = *pPage;
1266 }
1267
1268 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
1269 }
1270 else
1271 {
1272 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1273 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1274 {
1275 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
1276 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
1277 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1278 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
1279
1280 pRomPage->Virgin = *pPage;
1281 }
1282
1283 pRamNew = pRam;
1284 }
1285 pgmUnlock(pVM);
1286
1287
1288 /*
1289 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
1290 */
1291 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
1292#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
1293 pgmR3PhysRomWriteHandler, pRomNew,
1294#else
1295 NULL, NULL,
1296#endif
1297 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1298 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
1299 if (RT_SUCCESS(rc))
1300 {
1301 pgmLock(pVM);
1302
1303 /*
1304 * Copy the image over to the virgin pages.
1305 * This must be done after linking in the RAM range.
1306 */
1307 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
1308 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
1309 {
1310 void *pvDstPage;
1311 PPGMPAGEMAP pMapIgnored;
1312 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
1313 if (RT_FAILURE(rc))
1314 {
1315 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
1316 break;
1317 }
1318 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
1319 }
1320 if (RT_SUCCESS(rc))
1321 {
1322 /*
1323 * Initialize the ROM range.
1324 * Note that the Virgin member of the pages has already been initialized above.
1325 */
1326 pRomNew->GCPhys = GCPhys;
1327 pRomNew->GCPhysLast = GCPhysLast;
1328 pRomNew->cb = cb;
1329 pRomNew->fFlags = fFlags;
1330 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
1331 pRomNew->pszDesc = pszDesc;
1332
1333 for (unsigned iPage = 0; iPage < cPages; iPage++)
1334 {
1335 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
1336 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
1337 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1338 }
1339
1340 /*
1341 * Insert the ROM range, tell REM and return successfully.
1342 */
1343 pRomNew->pNextR3 = pRom;
1344 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
1345 pRomNew->pNextGC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTGCPTR;
1346
1347 if (pRomPrev)
1348 {
1349 pRomPrev->pNextR3 = pRomNew;
1350 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
1351 pRomPrev->pNextGC = MMHyperCCToRC(pVM, pRomNew);
1352 }
1353 else
1354 {
1355 pVM->pgm.s.pRomRangesR3 = pRomNew;
1356 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
1357 pVM->pgm.s.pRomRangesGC = MMHyperCCToRC(pVM, pRomNew);
1358 }
1359
1360 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
1361
1362 GMMR3AllocatePagesCleanup(pReq);
1363 pgmUnlock(pVM);
1364 return VINF_SUCCESS;
1365 }
1366
1367 /* bail out */
1368
1369 pgmUnlock(pVM);
1370 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1371 AssertRC(rc2);
1372 pgmLock(pVM);
1373 }
1374
1375 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
1376 if (pRamNew)
1377 MMHyperFree(pVM, pRamNew);
1378 }
1379 MMHyperFree(pVM, pRomNew);
1380 }
1381
1382 /** @todo Purge the mapping cache or something... */
1383 GMMR3FreeAllocatedPages(pVM, pReq);
1384 GMMR3AllocatePagesCleanup(pReq);
1385 pgmUnlock(pVM);
1386 return rc;
1387}
1388
1389
1390/**
1391 * \#PF Handler callback for ROM write accesses.
1392 *
1393 * @returns VINF_SUCCESS if the handler have carried out the operation.
1394 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1395 * @param pVM VM Handle.
1396 * @param GCPhys The physical address the guest is writing to.
1397 * @param pvPhys The HC mapping of that address.
1398 * @param pvBuf What the guest is reading/writing.
1399 * @param cbBuf How much it's reading/writing.
1400 * @param enmAccessType The access type.
1401 * @param pvUser User argument.
1402 */
1403/*static - shut up warning */
1404 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1405{
1406 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
1407 const uint32_t iPage = GCPhys - pRom->GCPhys;
1408 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
1409 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1410 switch (pRomPage->enmProt)
1411 {
1412 /*
1413 * Ignore.
1414 */
1415 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
1416 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
1417 return VINF_SUCCESS;
1418
1419 /*
1420 * Write to the ram page.
1421 */
1422 case PGMROMPROT_READ_ROM_WRITE_RAM:
1423 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
1424 {
1425 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
1426 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
1427
1428 /*
1429 * Take the lock, do lazy allocation, map the page and copy the data.
1430 *
1431 * Note that we have to bypass the mapping TLB since it works on
1432 * guest physical addresses and entering the shadow page would
1433 * kind of screw things up...
1434 */
1435 int rc = pgmLock(pVM);
1436 AssertRC(rc);
1437
1438 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
1439 {
1440 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
1441 if (RT_FAILURE(rc))
1442 {
1443 pgmUnlock(pVM);
1444 return rc;
1445 }
1446 }
1447
1448 void *pvDstPage;
1449 PPGMPAGEMAP pMapIgnored;
1450 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
1451 if (RT_SUCCESS(rc))
1452 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
1453
1454 pgmUnlock(pVM);
1455 return rc;
1456 }
1457
1458 default:
1459 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
1460 pRom->aPages[iPage].enmProt, iPage, GCPhys),
1461 VERR_INTERNAL_ERROR);
1462 }
1463}
1464
1465
1466
1467/**
1468 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
1469 * and verify that the virgin part is untouched.
1470 *
1471 * This is done after the normal memory has been cleared.
1472 *
1473 * ASSUMES that the caller owns the PGM lock.
1474 *
1475 * @param pVM The VM handle.
1476 */
1477int pgmR3PhysRomReset(PVM pVM)
1478{
1479 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1480 {
1481 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
1482
1483 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1484 {
1485 /*
1486 * Reset the physical handler.
1487 */
1488 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
1489 AssertRCReturn(rc, rc);
1490
1491 /*
1492 * What we do with the shadow pages depends on the memory
1493 * preallocation option. If not enabled, we'll just throw
1494 * out all the dirty pages and replace them by the zero page.
1495 */
1496 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
1497 {
1498 /* Count dirty shadow pages. */
1499 uint32_t cDirty = 0;
1500 uint32_t iPage = cPages;
1501 while (iPage-- > 0)
1502 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1503 cDirty++;
1504 if (cDirty)
1505 {
1506 /* Free the dirty pages. */
1507 PGMMFREEPAGESREQ pReq;
1508 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
1509 AssertRCReturn(rc, rc);
1510
1511 uint32_t iReqPage = 0;
1512 for (iPage = 0; iPage < cPages; iPage++)
1513 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1514 {
1515 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
1516 iReqPage++;
1517 }
1518
1519 rc = GMMR3FreePagesPerform(pVM, pReq);
1520 GMMR3FreePagesCleanup(pReq);
1521 AssertRCReturn(rc, rc);
1522
1523 /* setup the zero page. */
1524 for (iPage = 0; iPage < cPages; iPage++)
1525 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1526 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1527 }
1528 }
1529 else
1530 {
1531 /* clear all the pages. */
1532 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1533 {
1534 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1535 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
1536 if (RT_FAILURE(rc))
1537 break;
1538
1539 void *pvDstPage;
1540 PPGMPAGEMAP pMapIgnored;
1541 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
1542 if (RT_FAILURE(rc))
1543 break;
1544 ASMMemZeroPage(pvDstPage);
1545 }
1546 AssertRCReturn(rc, rc);
1547 }
1548 }
1549
1550#ifdef VBOX_STRICT
1551 /*
1552 * Verify that the virgin page is unchanged if possible.
1553 */
1554 if (pRom->pvOriginal)
1555 {
1556 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
1557 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
1558 {
1559 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1560 PPGMPAGEMAP pMapIgnored;
1561 void *pvDstPage;
1562 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
1563 if (RT_FAILURE(rc))
1564 break;
1565 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
1566 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
1567 GCPhys, pRom->pszDesc));
1568 }
1569 }
1570#endif
1571 }
1572
1573 return VINF_SUCCESS;
1574}
1575
1576
1577/**
1578 * Change the shadowing of a range of ROM pages.
1579 *
1580 * This is intended for implementing chipset specific memory registers
1581 * and will not be very strict about the input. It will silently ignore
1582 * any pages that are not the part of a shadowed ROM.
1583 *
1584 * @returns VBox status code.
1585 * @param pVM Pointer to the shared VM structure.
1586 * @param GCPhys Where to start. Page aligned.
1587 * @param cb How much to change. Page aligned.
1588 * @param enmProt The new ROM protection.
1589 */
1590PGMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
1591{
1592 /*
1593 * Check input
1594 */
1595 if (!cb)
1596 return VINF_SUCCESS;
1597 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1598 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1599 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1600 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1601 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
1602
1603 /*
1604 * Process the request.
1605 */
1606 bool fFlushedPool = false;
1607 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1608 if ( GCPhys <= pRom->GCPhysLast
1609 && GCPhysLast >= pRom->GCPhys)
1610 {
1611 /*
1612 * Iterate the relevant pages and the ncessary make changes.
1613 */
1614 bool fChanges = false;
1615 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
1616 ? pRom->cb >> PAGE_SHIFT
1617 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
1618 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1619 iPage < cPages;
1620 iPage++)
1621 {
1622 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1623 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
1624 {
1625 fChanges = true;
1626
1627 /* flush the page pool first so we don't leave any usage references dangling. */
1628 if (!fFlushedPool)
1629 {
1630 pgmPoolFlushAll(pVM);
1631 fFlushedPool = true;
1632 }
1633
1634 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1635 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1636 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
1637
1638 *pOld = *pRamPage;
1639 *pRamPage = *pNew;
1640 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
1641 }
1642 }
1643
1644 /*
1645 * Reset the access handler if we made changes, no need
1646 * to optimize this.
1647 */
1648 if (fChanges)
1649 {
1650 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
1651 AssertRCReturn(rc, rc);
1652 }
1653
1654 /* Advance - cb isn't updated. */
1655 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
1656 }
1657
1658 return VINF_SUCCESS;
1659}
1660
1661
1662/**
1663 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
1664 * registration APIs calls to inform PGM about memory registrations.
1665 *
1666 * It registers the physical memory range with PGM. MM is responsible
1667 * for the toplevel things - allocation and locking - while PGM is taking
1668 * care of all the details and implements the physical address space virtualization.
1669 *
1670 * @returns VBox status.
1671 * @param pVM The VM handle.
1672 * @param pvRam HC virtual address of the RAM range. (page aligned)
1673 * @param GCPhys GC physical address of the RAM range. (page aligned)
1674 * @param cb Size of the RAM range. (page aligned)
1675 * @param fFlags Flags, MM_RAM_*.
1676 * @param paPages Pointer an array of physical page descriptors.
1677 * @param pszDesc Description string.
1678 */
1679PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1680{
1681 /*
1682 * Validate input.
1683 * (Not so important because callers are only MMR3PhysRegister()
1684 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1685 */
1686 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1687
1688 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
1689 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
1690 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
1691 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
1692 Assert(!(fFlags & ~0xfff));
1693 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1694 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1695 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1696 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1697 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1698 if (GCPhysLast < GCPhys)
1699 {
1700 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1701 return VERR_INVALID_PARAMETER;
1702 }
1703
1704 /*
1705 * Find range location and check for conflicts.
1706 */
1707 PPGMRAMRANGE pPrev = NULL;
1708 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1709 while (pCur)
1710 {
1711 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1712 {
1713 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1714 return VERR_PGM_RAM_CONFLICT;
1715 }
1716 if (GCPhysLast < pCur->GCPhys)
1717 break;
1718
1719 /* next */
1720 pPrev = pCur;
1721 pCur = pCur->pNextR3;
1722 }
1723
1724 /*
1725 * Allocate RAM range.
1726 * Small ranges are allocated from the heap, big ones have separate mappings.
1727 */
1728 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
1729 PPGMRAMRANGE pNew;
1730 RTGCPTR GCPtrNew;
1731 int rc = VERR_NO_MEMORY;
1732 if (cbRam > PAGE_SIZE / 2)
1733 { /* large */
1734 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
1735 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
1736 if (VBOX_SUCCESS(rc))
1737 {
1738 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
1739 if (VBOX_SUCCESS(rc))
1740 {
1741 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
1742 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1743 }
1744 else
1745 {
1746 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
1747 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
1748 }
1749 }
1750 else
1751 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
1752
1753 }
1754/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
1755 if (RT_FAILURE(rc))
1756 { /* small + fallback (vga) */
1757 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1758 if (VBOX_SUCCESS(rc))
1759 GCPtrNew = MMHyperHC2GC(pVM, pNew);
1760 else
1761 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
1762 }
1763 if (VBOX_SUCCESS(rc))
1764 {
1765 /*
1766 * Initialize the range.
1767 */
1768 pNew->pvHC = pvRam;
1769 pNew->GCPhys = GCPhys;
1770 pNew->GCPhysLast = GCPhysLast;
1771 pNew->cb = cb;
1772 pNew->fFlags = fFlags;
1773 pNew->pavHCChunkHC = NULL;
1774 pNew->pavHCChunkGC = 0;
1775
1776 unsigned iPage = cb >> PAGE_SHIFT;
1777 if (paPages)
1778 {
1779 while (iPage-- > 0)
1780 {
1781 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1782 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
1783 PGM_PAGE_STATE_ALLOCATED);
1784 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1785 }
1786 }
1787 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1788 {
1789 /* Allocate memory for chunk to HC ptr lookup array. */
1790 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
1791 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
1792
1793 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
1794 Assert(pNew->pavHCChunkGC);
1795
1796 /* Physical memory will be allocated on demand. */
1797 while (iPage-- > 0)
1798 {
1799 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
1800 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1801 }
1802 }
1803 else
1804 {
1805 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1806 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
1807 while (iPage-- > 0)
1808 {
1809 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
1810 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1811 }
1812 }
1813
1814 /*
1815 * Insert the new RAM range.
1816 */
1817 pgmLock(pVM);
1818 pNew->pNextR3 = pCur;
1819 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1820 pNew->pNextGC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTGCPTR;
1821 if (pPrev)
1822 {
1823 pPrev->pNextR3 = pNew;
1824 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1825 pPrev->pNextGC = GCPtrNew;
1826 }
1827 else
1828 {
1829 pVM->pgm.s.pRamRangesR3 = pNew;
1830 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1831 pVM->pgm.s.pRamRangesGC = GCPtrNew;
1832 }
1833 pgmUnlock(pVM);
1834 }
1835 return rc;
1836}
1837
1838#ifndef VBOX_WITH_NEW_PHYS_CODE
1839
1840/**
1841 * Register a chunk of a the physical memory range with PGM. MM is responsible
1842 * for the toplevel things - allocation and locking - while PGM is taking
1843 * care of all the details and implements the physical address space virtualization.
1844 *
1845 *
1846 * @returns VBox status.
1847 * @param pVM The VM handle.
1848 * @param pvRam HC virtual address of the RAM range. (page aligned)
1849 * @param GCPhys GC physical address of the RAM range. (page aligned)
1850 * @param cb Size of the RAM range. (page aligned)
1851 * @param fFlags Flags, MM_RAM_*.
1852 * @param paPages Pointer an array of physical page descriptors.
1853 * @param pszDesc Description string.
1854 */
1855PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1856{
1857 NOREF(pszDesc);
1858
1859 /*
1860 * Validate input.
1861 * (Not so important because callers are only MMR3PhysRegister()
1862 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1863 */
1864 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1865
1866 Assert(paPages);
1867 Assert(pvRam);
1868 Assert(!(fFlags & ~0xfff));
1869 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1870 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1871 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1872 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1873 Assert(VM_IS_EMT(pVM));
1874 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1875 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1876
1877 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1878 if (GCPhysLast < GCPhys)
1879 {
1880 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1881 return VERR_INVALID_PARAMETER;
1882 }
1883
1884 /*
1885 * Find existing range location.
1886 */
1887 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1888 while (pRam)
1889 {
1890 RTGCPHYS off = GCPhys - pRam->GCPhys;
1891 if ( off < pRam->cb
1892 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1893 break;
1894
1895 pRam = CTXALLSUFF(pRam->pNext);
1896 }
1897 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1898
1899 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1900 unsigned iPage = cb >> PAGE_SHIFT;
1901 if (paPages)
1902 {
1903 while (iPage-- > 0)
1904 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1905 }
1906 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1907 pRam->pavHCChunkHC[off] = pvRam;
1908
1909 /* Notify the recompiler. */
1910 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1911
1912 return VINF_SUCCESS;
1913}
1914
1915
1916/**
1917 * Allocate missing physical pages for an existing guest RAM range.
1918 *
1919 * @returns VBox status.
1920 * @param pVM The VM handle.
1921 * @param GCPhys GC physical address of the RAM range. (page aligned)
1922 */
1923PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
1924{
1925 RTGCPHYS GCPhys = *pGCPhys;
1926
1927 /*
1928 * Walk range list.
1929 */
1930 pgmLock(pVM);
1931
1932 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1933 while (pRam)
1934 {
1935 RTGCPHYS off = GCPhys - pRam->GCPhys;
1936 if ( off < pRam->cb
1937 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1938 {
1939 bool fRangeExists = false;
1940 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1941
1942 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1943 if (pRam->pavHCChunkHC[off])
1944 fRangeExists = true;
1945
1946 pgmUnlock(pVM);
1947 if (fRangeExists)
1948 return VINF_SUCCESS;
1949 return pgmr3PhysGrowRange(pVM, GCPhys);
1950 }
1951
1952 pRam = CTXALLSUFF(pRam->pNext);
1953 }
1954 pgmUnlock(pVM);
1955 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1956}
1957
1958
1959/**
1960 * Allocate missing physical pages for an existing guest RAM range.
1961 *
1962 * @returns VBox status.
1963 * @param pVM The VM handle.
1964 * @param pRamRange RAM range
1965 * @param GCPhys GC physical address of the RAM range. (page aligned)
1966 */
1967int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1968{
1969 void *pvRam;
1970 int rc;
1971
1972 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1973 if (!VM_IS_EMT(pVM))
1974 {
1975 PVMREQ pReq;
1976 const RTGCPHYS GCPhysParam = GCPhys;
1977
1978 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1979
1980 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
1981 if (VBOX_SUCCESS(rc))
1982 {
1983 rc = pReq->iStatus;
1984 VMR3ReqFree(pReq);
1985 }
1986 return rc;
1987 }
1988
1989 /* Round down to chunk boundary */
1990 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1991
1992 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
1993 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1994
1995 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1996
1997 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1998
1999 for (;;)
2000 {
2001 rc = SUPPageAlloc(cPages, &pvRam);
2002 if (VBOX_SUCCESS(rc))
2003 {
2004
2005 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
2006 if (VBOX_SUCCESS(rc))
2007 return rc;
2008
2009 SUPPageFree(pvRam, cPages);
2010 }
2011
2012 VMSTATE enmVMState = VMR3GetState(pVM);
2013 if (enmVMState != VMSTATE_RUNNING)
2014 {
2015 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
2016 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
2017 return rc;
2018 }
2019
2020 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
2021
2022 /* Pause first, then inform Main. */
2023 rc = VMR3SuspendNoSave(pVM);
2024 AssertRC(rc);
2025
2026 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
2027
2028 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
2029 rc = VMR3WaitForResume(pVM);
2030
2031 /* Retry */
2032 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
2033 }
2034}
2035
2036#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2037
2038
2039/**
2040 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
2041 * flags of existing RAM ranges.
2042 *
2043 * @returns VBox status.
2044 * @param pVM The VM handle.
2045 * @param GCPhys GC physical address of the RAM range. (page aligned)
2046 * @param cb Size of the RAM range. (page aligned)
2047 * @param fFlags The Or flags, MM_RAM_* \#defines.
2048 * @param fMask The and mask for the flags.
2049 */
2050PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
2051{
2052 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
2053
2054 /*
2055 * Validate input.
2056 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
2057 */
2058 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
2059 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2060 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2061 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2062 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2063
2064 /*
2065 * Lookup the range.
2066 */
2067 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
2068 while (pRam && GCPhys > pRam->GCPhysLast)
2069 pRam = CTXALLSUFF(pRam->pNext);
2070 if ( !pRam
2071 || GCPhys > pRam->GCPhysLast
2072 || GCPhysLast < pRam->GCPhys)
2073 {
2074 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
2075 return VERR_INVALID_PARAMETER;
2076 }
2077
2078 /*
2079 * Update the requested flags.
2080 */
2081 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
2082 | fMask;
2083 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
2084 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2085 for ( ; iPage < iPageEnd; iPage++)
2086 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
2087
2088 return VINF_SUCCESS;
2089}
2090
2091
2092/**
2093 * Sets the Address Gate 20 state.
2094 *
2095 * @param pVM VM handle.
2096 * @param fEnable True if the gate should be enabled.
2097 * False if the gate should be disabled.
2098 */
2099PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
2100{
2101 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
2102 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
2103 {
2104 pVM->pgm.s.fA20Enabled = fEnable;
2105 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2106 REMR3A20Set(pVM, fEnable);
2107 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
2108 }
2109}
2110
2111
2112/**
2113 * Tree enumeration callback for dealing with age rollover.
2114 * It will perform a simple compression of the current age.
2115 */
2116static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2117{
2118 /* Age compression - ASSUMES iNow == 4. */
2119 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2120 if (pChunk->iAge >= UINT32_C(0xffffff00))
2121 pChunk->iAge = 3;
2122 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2123 pChunk->iAge = 2;
2124 else if (pChunk->iAge)
2125 pChunk->iAge = 1;
2126 else /* iAge = 0 */
2127 pChunk->iAge = 4;
2128
2129 /* reinsert */
2130 PVM pVM = (PVM)pvUser;
2131 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2132 pChunk->AgeCore.Key = pChunk->iAge;
2133 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2134 return 0;
2135}
2136
2137
2138/**
2139 * Tree enumeration callback that updates the chunks that have
2140 * been used since the last
2141 */
2142static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2143{
2144 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2145 if (!pChunk->iAge)
2146 {
2147 PVM pVM = (PVM)pvUser;
2148 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2149 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2150 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2151 }
2152
2153 return 0;
2154}
2155
2156
2157/**
2158 * Performs ageing of the ring-3 chunk mappings.
2159 *
2160 * @param pVM The VM handle.
2161 */
2162PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2163{
2164 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2165 pVM->pgm.s.ChunkR3Map.iNow++;
2166 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2167 {
2168 pVM->pgm.s.ChunkR3Map.iNow = 4;
2169 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2170 }
2171 else
2172 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2173}
2174
2175
2176/**
2177 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2178 */
2179typedef struct PGMR3PHYSCHUNKUNMAPCB
2180{
2181 PVM pVM; /**< The VM handle. */
2182 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2183} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2184
2185
2186/**
2187 * Callback used to find the mapping that's been unused for
2188 * the longest time.
2189 */
2190static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2191{
2192 do
2193 {
2194 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2195 if ( pChunk->iAge
2196 && !pChunk->cRefs)
2197 {
2198 /*
2199 * Check that it's not in any of the TLBs.
2200 */
2201 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2202 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2203 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2204 {
2205 pChunk = NULL;
2206 break;
2207 }
2208 if (pChunk)
2209 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2210 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2211 {
2212 pChunk = NULL;
2213 break;
2214 }
2215 if (pChunk)
2216 {
2217 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2218 return 1; /* done */
2219 }
2220 }
2221
2222 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2223 pNode = pNode->pList;
2224 } while (pNode);
2225 return 0;
2226}
2227
2228
2229/**
2230 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2231 *
2232 * The candidate will not be part of any TLBs, so no need to flush
2233 * anything afterwards.
2234 *
2235 * @returns Chunk id.
2236 * @param pVM The VM handle.
2237 */
2238static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2239{
2240 /*
2241 * Do tree ageing first?
2242 */
2243 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2244 PGMR3PhysChunkAgeing(pVM);
2245
2246 /*
2247 * Enumerate the age tree starting with the left most node.
2248 */
2249 PGMR3PHYSCHUNKUNMAPCB Args;
2250 Args.pVM = pVM;
2251 Args.pChunk = NULL;
2252 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2253 return Args.pChunk->Core.Key;
2254 return INT32_MAX;
2255}
2256
2257
2258/**
2259 * Maps the given chunk into the ring-3 mapping cache.
2260 *
2261 * This will call ring-0.
2262 *
2263 * @returns VBox status code.
2264 * @param pVM The VM handle.
2265 * @param idChunk The chunk in question.
2266 * @param ppChunk Where to store the chunk tracking structure.
2267 *
2268 * @remarks Called from within the PGM critical section.
2269 */
2270int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
2271{
2272 int rc;
2273 /*
2274 * Allocate a new tracking structure first.
2275 */
2276#if 0 /* for later when we've got a separate mapping method for ring-0. */
2277 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
2278 AssertReturn(pChunk, VERR_NO_MEMORY);
2279#else
2280 PPGMCHUNKR3MAP pChunk;
2281 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
2282 AssertRCReturn(rc, rc);
2283#endif
2284 pChunk->Core.Key = idChunk;
2285 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
2286 pChunk->iAge = 0;
2287 pChunk->cRefs = 0;
2288 pChunk->cPermRefs = 0;
2289 pChunk->pv = NULL;
2290
2291 /*
2292 * Request the ring-0 part to map the chunk in question and if
2293 * necessary unmap another one to make space in the mapping cache.
2294 */
2295 GMMMAPUNMAPCHUNKREQ Req;
2296 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
2297 Req.Hdr.cbReq = sizeof(Req);
2298 Req.pvR3 = NULL;
2299 Req.idChunkMap = idChunk;
2300 Req.idChunkUnmap = INT32_MAX;
2301 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
2302 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
2303 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
2304 if (VBOX_SUCCESS(rc))
2305 {
2306 /*
2307 * Update the tree.
2308 */
2309 /* insert the new one. */
2310 AssertPtr(Req.pvR3);
2311 pChunk->pv = Req.pvR3;
2312 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2313 AssertRelease(fRc);
2314 pVM->pgm.s.ChunkR3Map.c++;
2315
2316 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2317 AssertRelease(fRc);
2318
2319 /* remove the unmapped one. */
2320 if (Req.idChunkUnmap != INT32_MAX)
2321 {
2322 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2323 AssertRelease(pUnmappedChunk);
2324 pUnmappedChunk->pv = NULL;
2325 pUnmappedChunk->Core.Key = UINT32_MAX;
2326#if 0 /* for later when we've got a separate mapping method for ring-0. */
2327 MMR3HeapFree(pUnmappedChunk);
2328#else
2329 MMHyperFree(pVM, pUnmappedChunk);
2330#endif
2331 pVM->pgm.s.ChunkR3Map.c--;
2332 }
2333 }
2334 else
2335 {
2336 AssertRC(rc);
2337#if 0 /* for later when we've got a separate mapping method for ring-0. */
2338 MMR3HeapFree(pChunk);
2339#else
2340 MMHyperFree(pVM, pChunk);
2341#endif
2342 pChunk = NULL;
2343 }
2344
2345 *ppChunk = pChunk;
2346 return rc;
2347}
2348
2349
2350/**
2351 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
2352 *
2353 * @returns see pgmR3PhysChunkMap.
2354 * @param pVM The VM handle.
2355 * @param idChunk The chunk to map.
2356 */
2357PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
2358{
2359 PPGMCHUNKR3MAP pChunk;
2360 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
2361}
2362
2363
2364/**
2365 * Invalidates the TLB for the ring-3 mapping cache.
2366 *
2367 * @param pVM The VM handle.
2368 */
2369PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
2370{
2371 pgmLock(pVM);
2372 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2373 {
2374 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
2375 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
2376 }
2377 pgmUnlock(pVM);
2378}
2379
2380
2381/**
2382 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
2383 *
2384 * @returns The following VBox status codes.
2385 * @retval VINF_SUCCESS on success. FF cleared.
2386 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
2387 *
2388 * @param pVM The VM handle.
2389 */
2390PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
2391{
2392 pgmLock(pVM);
2393 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2394 if (rc == VERR_GMM_SEED_ME)
2395 {
2396 void *pvChunk;
2397 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
2398 if (VBOX_SUCCESS(rc))
2399 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
2400 if (VBOX_FAILURE(rc))
2401 {
2402 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
2403 rc = VINF_EM_NO_MEMORY;
2404 }
2405 }
2406 pgmUnlock(pVM);
2407 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
2408 return rc;
2409}
2410
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette