VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 8347

Last change on this file since 8347 was 8347, checked in by vboxsync, 17 years ago

gcc warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.2 KB
Line 
1/* $Id: PGMPhys.cpp 8347 2008-04-24 08:03:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/csam.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <VBox/log.h>
44#include <iprt/thread.h>
45#include <iprt/string.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51/*static - shut up warning */
52DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
53
54
55
56/*
57 * PGMR3PhysReadByte/Word/Dword
58 * PGMR3PhysWriteByte/Word/Dword
59 */
60/** @todo rename and add U64. */
61
62#define PGMPHYSFN_READNAME PGMR3PhysReadByte
63#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
64#define PGMPHYS_DATASIZE 1
65#define PGMPHYS_DATATYPE uint8_t
66#include "PGMPhys.h"
67
68#define PGMPHYSFN_READNAME PGMR3PhysReadWord
69#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
70#define PGMPHYS_DATASIZE 2
71#define PGMPHYS_DATATYPE uint16_t
72#include "PGMPhys.h"
73
74#define PGMPHYSFN_READNAME PGMR3PhysReadDword
75#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
76#define PGMPHYS_DATASIZE 4
77#define PGMPHYS_DATATYPE uint32_t
78#include "PGMPhys.h"
79
80
81
82/**
83 * Links a new RAM range into the list.
84 *
85 * @param pVM Pointer to the shared VM structure.
86 * @param pNew Pointer to the new list entry.
87 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
88 */
89static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
90{
91 pgmLock(pVM);
92
93 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
94 pNew->pNextR3 = pRam;
95 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
96 pNew->pNextGC = pRam ? MMHyperCCToGC(pVM, pRam) : NIL_RTGCPTR;
97
98 if (pPrev)
99 {
100 pPrev->pNextR3 = pNew;
101 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
102 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
103 }
104 else
105 {
106 pVM->pgm.s.pRamRangesR3 = pNew;
107 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
108 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
109 }
110
111 pgmUnlock(pVM);
112}
113
114
115/**
116 * Unlink an existing RAM range from the list.
117 *
118 * @param pVM Pointer to the shared VM structure.
119 * @param pRam Pointer to the new list entry.
120 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
121 */
122static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
123{
124 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
125
126 pgmLock(pVM);
127
128 PPGMRAMRANGE pNext = pRam->pNextR3;
129 if (pPrev)
130 {
131 pPrev->pNextR3 = pNext;
132 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
133 pPrev->pNextGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
134 }
135 else
136 {
137 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
138 pVM->pgm.s.pRamRangesR3 = pNext;
139 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
140 pVM->pgm.s.pRamRangesGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
141 }
142
143 pgmUnlock(pVM);
144}
145
146
147/**
148 * Unlink an existing RAM range from the list.
149 *
150 * @param pVM Pointer to the shared VM structure.
151 * @param pRam Pointer to the new list entry.
152 */
153static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
154{
155 /* find prev. */
156 PPGMRAMRANGE pPrev = NULL;
157 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
158 while (pCur != pRam)
159 {
160 pPrev = pCur;
161 pCur = pCur->pNextR3;
162 }
163 AssertFatal(pCur);
164
165 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
166}
167
168
169
170/**
171 * Sets up a range RAM.
172 *
173 * This will check for conflicting registrations, make a resource
174 * reservation for the memory (with GMM), and setup the per-page
175 * tracking structures (PGMPAGE).
176 *
177 * @returns VBox stutus code.
178 * @param pVM Pointer to the shared VM structure.
179 * @param GCPhys The physical address of the RAM.
180 * @param cb The size of the RAM.
181 * @param pszDesc The description - not copied, so, don't free or change it.
182 */
183PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
184{
185 /*
186 * Validate input.
187 */
188 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
189 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
190 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
191 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
192 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
193 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
194 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
195 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
196
197 /*
198 * Find range location and check for conflicts.
199 * (We don't lock here because the locking by EMT is only required on update.)
200 */
201 PPGMRAMRANGE pPrev = NULL;
202 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
203 while (pRam && GCPhysLast >= pRam->GCPhys)
204 {
205 if ( GCPhysLast >= pRam->GCPhys
206 && GCPhys <= pRam->GCPhysLast)
207 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
208 GCPhys, GCPhysLast, pszDesc,
209 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
210 VERR_PGM_RAM_CONFLICT);
211
212 /* next */
213 pPrev = pRam;
214 pRam = pRam->pNextR3;
215 }
216
217 /*
218 * Register it with GMM (the API bitches).
219 */
220 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
221 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
222 if (RT_FAILURE(rc))
223 return rc;
224
225 /*
226 * Allocate RAM range.
227 */
228 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
229 PPGMRAMRANGE pNew;
230 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
231 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
232
233 /*
234 * Initialize the range.
235 */
236 pNew->GCPhys = GCPhys;
237 pNew->GCPhysLast = GCPhysLast;
238 pNew->pszDesc = pszDesc;
239 pNew->cb = cb;
240 pNew->fFlags = 0;
241
242 pNew->pvHC = NULL;
243 pNew->pavHCChunkHC = NULL;
244 pNew->pavHCChunkGC = 0;
245
246#ifndef VBOX_WITH_NEW_PHYS_CODE
247 /* Allocate memory for chunk to HC ptr lookup array. */
248 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
249 AssertRCReturn(rc, rc);
250 pNew->pavHCChunkGC = MMHyperCCToGC(pVM, pNew->pavHCChunkHC);
251 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
252
253#endif
254 RTGCPHYS iPage = cPages;
255 while (iPage-- > 0)
256 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
257
258 /*
259 * Insert the new RAM range.
260 */
261 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
262
263 /*
264 * Notify REM.
265 */
266#ifdef VBOX_WITH_NEW_PHYS_CODE
267 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
268#else
269 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
270#endif
271
272 return VINF_SUCCESS;
273}
274
275
276/**
277 * Resets (zeros) the RAM.
278 *
279 * ASSUMES that the caller owns the PGM lock.
280 *
281 * @returns VBox status code.
282 * @param pVM Pointer to the shared VM structure.
283 */
284int pgmR3PhysRamReset(PVM pVM)
285{
286 /*
287 * Walk the ram ranges.
288 */
289 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
290 {
291 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
292#ifdef VBOX_WITH_NEW_PHYS_CODE
293 if (!pVM->pgm.f.fRamPreAlloc)
294 {
295 /* Replace all RAM pages by ZERO pages. */
296 while (iPage-- > 0)
297 {
298 PPGMPAGE pPage = &pRam->aPages[iPage];
299 switch (PGM_PAGE_GET_TYPE(pPage))
300 {
301 case PGMPAGETYPE_RAM:
302 if (!PGM_PAGE_IS_ZERO(pPage))
303 pgmPhysFreePage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
304 break;
305
306 case PGMPAGETYPE_MMIO2:
307 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
308 case PGMPAGETYPE_ROM:
309 case PGMPAGETYPE_MMIO:
310 break;
311 default:
312 AssertFailed();
313 }
314 } /* for each page */
315 }
316 else
317#endif
318 {
319 /* Zero the memory. */
320 while (iPage-- > 0)
321 {
322 PPGMPAGE pPage = &pRam->aPages[iPage];
323 switch (PGM_PAGE_GET_TYPE(pPage))
324 {
325#ifndef VBOX_WITH_NEW_PHYS_CODE
326 case PGMPAGETYPE_INVALID:
327 case PGMPAGETYPE_RAM:
328 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
329 {
330 /* shadow ram is reloaded elsewhere. */
331 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
332 continue;
333 }
334 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
335 {
336 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
337 if (pRam->pavHCChunkHC[iChunk])
338 ASMMemZero32((char *)pRam->pavHCChunkHC[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
339 }
340 else
341 ASMMemZero32((char *)pRam->pvHC + (iPage << PAGE_SHIFT), PAGE_SIZE);
342 break;
343#else /* VBOX_WITH_NEW_PHYS_CODE */
344 case PGMPAGETYPE_RAM:
345 switch (PGM_PAGE_GET_STATE(pPage))
346 {
347 case PGM_PAGE_STATE_ZERO:
348 break;
349 case PGM_PAGE_STATE_SHARED:
350 case PGM_PAGE_STATE_WRITE_MONITORED:
351 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
352 AssertLogRelRCReturn(rc, rc);
353 case PGM_PAGE_STATE_ALLOCATED:
354 {
355 void *pvPage;
356 PPGMPAGEMAP pMapIgnored;
357 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT), &pMapIgnored, &pvPage);
358 AssertLogRelRCReturn(rc, rc);
359 ASMMemZeroPage(pvPage);
360 break;
361 }
362 }
363 break;
364#endif /* VBOX_WITH_NEW_PHYS_CODE */
365
366 case PGMPAGETYPE_MMIO2:
367 case PGMPAGETYPE_ROM_SHADOW:
368 case PGMPAGETYPE_ROM:
369 case PGMPAGETYPE_MMIO:
370 break;
371 default:
372 AssertFailed();
373
374 }
375 } /* for each page */
376 }
377
378 }
379
380 return VINF_SUCCESS;
381}
382
383
384/**
385 * This is the interface IOM is using to register an MMIO region.
386 *
387 * It will check for conflicts and ensure that a RAM range structure
388 * is present before calling the PGMR3HandlerPhysicalRegister API to
389 * register the callbacks.
390 *
391 * @returns VBox status code.
392 *
393 * @param pVM Pointer to the shared VM structure.
394 * @param GCPhys The start of the MMIO region.
395 * @param cb The size of the MMIO region.
396 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
397 * @param pvUserR3 The user argument for R3.
398 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
399 * @param pvUserR0 The user argument for R0.
400 * @param pfnHandlerGC The address of the GC handler. (IOMMMIOHandler)
401 * @param pvUserGC The user argument for GC.
402 * @param pszDesc The description of the MMIO region.
403 */
404PDMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
405 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
406 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
407 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
408 R3PTRTYPE(const char *) pszDesc)
409{
410 /*
411 * Assert on some assumption.
412 */
413 VM_ASSERT_EMT(pVM);
414 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
415 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
416 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
417 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
418
419 /*
420 * Make sure there's a RAM range structure for the region.
421 */
422 int rc;
423 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
424 bool fRamExists = false;
425 PPGMRAMRANGE pRamPrev = NULL;
426 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
427 while (pRam && GCPhysLast >= pRam->GCPhys)
428 {
429 if ( GCPhysLast >= pRam->GCPhys
430 && GCPhys <= pRam->GCPhysLast)
431 {
432 /* Simplification: all within the same range. */
433 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
434 && GCPhysLast <= pRam->GCPhysLast,
435 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
436 GCPhys, GCPhysLast, pszDesc,
437 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
438 VERR_PGM_RAM_CONFLICT);
439
440 /* Check that it's all RAM or MMIO pages. */
441 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
442 uint32_t cLeft = cb >> PAGE_SHIFT;
443 while (cLeft-- > 0)
444 {
445 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
446 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
447 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
448 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
449 VERR_PGM_RAM_CONFLICT);
450 pPage++;
451 }
452
453 /* Looks good. */
454 fRamExists = true;
455 break;
456 }
457
458 /* next */
459 pRamPrev = pRam;
460 pRam = pRam->pNextR3;
461 }
462 PPGMRAMRANGE pNew;
463 if (fRamExists)
464 pNew = NULL;
465 else
466 {
467 /*
468 * No RAM range, insert an ad-hoc one.
469 *
470 * Note that we don't have to tell REM about this range because
471 * PGMHandlerPhysicalRegisterEx will do that for us.
472 */
473 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
474
475 const uint32_t cPages = cb >> PAGE_SHIFT;
476 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
477 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
478 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
479
480 /* Initialize the range. */
481 pNew->GCPhys = GCPhys;
482 pNew->GCPhysLast = GCPhysLast;
483 pNew->pszDesc = pszDesc;
484 pNew->cb = cb;
485 pNew->fFlags = 0; /* Some MMIO flag here? */
486
487 pNew->pvHC = NULL;
488 pNew->pavHCChunkHC = NULL;
489 pNew->pavHCChunkGC = 0;
490
491 uint32_t iPage = cPages;
492 while (iPage-- > 0)
493 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
494 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
495
496 /* link it */
497 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
498 }
499
500 /*
501 * Register the access handler.
502 */
503 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
504 pfnHandlerR3, pvUserR3,
505 pfnHandlerR0, pvUserR0,
506 pfnHandlerGC, pvUserGC, pszDesc);
507 if ( RT_FAILURE(rc)
508 && !fRamExists)
509 {
510 /* remove the ad-hoc range. */
511 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
512 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
513 MMHyperFree(pVM, pRam);
514 }
515
516 return rc;
517}
518
519
520/**
521 * This is the interface IOM is using to register an MMIO region.
522 *
523 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
524 * any ad-hoc PGMRAMRANGE left behind.
525 *
526 * @returns VBox status code.
527 * @param pVM Pointer to the shared VM structure.
528 * @param GCPhys The start of the MMIO region.
529 * @param cb The size of the MMIO region.
530 */
531PDMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
532{
533 VM_ASSERT_EMT(pVM);
534
535 /*
536 * First deregister the handler, then check if we should remove the ram range.
537 */
538 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
539 if (RT_SUCCESS(rc))
540 {
541 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
542 PPGMRAMRANGE pRamPrev = NULL;
543 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
544 while (pRam && GCPhysLast >= pRam->GCPhys)
545 {
546 /*if ( GCPhysLast >= pRam->GCPhys
547 && GCPhys <= pRam->GCPhysLast) - later */
548 if ( GCPhysLast == pRam->GCPhysLast
549 && GCPhys == pRam->GCPhys)
550 {
551 Assert(pRam->cb == cb);
552
553 /*
554 * See if all the pages are dead MMIO pages.
555 */
556 bool fAllMMIO = true;
557 PPGMPAGE pPage = &pRam->aPages[0];
558 uint32_t cLeft = cb >> PAGE_SHIFT;
559 while (cLeft-- > 0)
560 {
561 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
562 /*|| not-out-of-action later */)
563 {
564 fAllMMIO = false;
565 break;
566 }
567 pPage++;
568 }
569
570 /*
571 * Unlink it and free if it's all MMIO.
572 */
573 if (fAllMMIO)
574 {
575 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
576 GCPhys, GCPhysLast, pRam->pszDesc));
577
578 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
579 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
580 MMHyperFree(pVM, pRam);
581 }
582 break;
583 }
584
585 /* next */
586 pRamPrev = pRam;
587 pRam = pRam->pNextR3;
588 }
589 }
590
591 return rc;
592}
593
594
595/**
596 * Locate a MMIO2 range.
597 *
598 * @returns Pointer to the MMIO2 range.
599 * @param pVM Pointer to the shared VM structure.
600 * @param pDevIns The device instance owning the region.
601 * @param iRegion The region.
602 */
603DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
604{
605 /*
606 * Search the list.
607 */
608 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
609 if (pCur->pDevInsR3 == pDevIns)
610 return pCur;
611 return NULL;
612}
613
614
615/**
616 * Allocate and register a MMIO2 region.
617 *
618 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
619 * RAM associated with a device. It is also non-shared memory with a
620 * permanent ring-3 mapping and page backing (presently).
621 *
622 * A MMIO2 range may overlap with base memory if a lot of RAM
623 * is configured for the VM, in which case we'll drop the base
624 * memory pages. Presently we will make no attempt to preserve
625 * anything that happens to be present in the base memory that
626 * is replaced, this is of course incorrectly but it's too much
627 * effort.
628 *
629 * @returns VBox status code.
630 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
631 * @retval VERR_ALREADY_EXISTS if the region already exists.
632 *
633 * @param pVM Pointer to the shared VM structure.
634 * @param pDevIns The device instance owning the region.
635 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
636 * this number has to be the number of that region. Otherwise
637 * it can be any number safe UINT8_MAX.
638 * @param cb The size of the region. Must be page aligned.
639 * @param fFlags Reserved for future use, must be zero.
640 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
641 * @param pszDesc The description.
642 */
643PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
644{
645 /*
646 * Validate input.
647 */
648 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
649 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
650 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
651 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
652 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
653 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
654 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
655 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
656 AssertReturn(cb, VERR_INVALID_PARAMETER);
657 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
658
659 const uint32_t cPages = cb >> PAGE_SHIFT;
660 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
661 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
662
663 /*
664 * Try reserve and allocate the backing memory first as this is what is
665 * most likely to fail.
666 */
667 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
668 if (RT_FAILURE(rc))
669 return rc;
670
671 void *pvPages;
672 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
673 if (RT_SUCCESS(rc))
674 rc = SUPPageAllocLockedEx(cPages, &pvPages, paPages);
675 if (RT_SUCCESS(rc))
676 {
677 /*
678 * Create the MMIO2 range record for it.
679 */
680 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
681 PPGMMMIO2RANGE pNew;
682 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
683 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
684 if (RT_SUCCESS(rc))
685 {
686 pNew->pDevInsR3 = pDevIns;
687 pNew->pvR3 = pvPages;
688 //pNew->pNext = NULL;
689 //pNew->fMapped = false;
690 //pNew->fOverlapping = false;
691 pNew->iRegion = iRegion;
692 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
693 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
694 pNew->RamRange.pszDesc = pszDesc;
695 pNew->RamRange.cb = cb;
696 //pNew->RamRange.fFlags = 0;
697
698 pNew->RamRange.pvHC = pvPages; ///@todo remove this
699 pNew->RamRange.pavHCChunkHC = NULL; ///@todo remove this
700 pNew->RamRange.pavHCChunkGC = 0; ///@todo remove this
701
702 uint32_t iPage = cPages;
703 while (iPage-- > 0)
704 {
705 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
706 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
707 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
708 }
709
710 /*
711 * Link it into the list.
712 * Since there is no particular order, just push it.
713 */
714 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
715 pVM->pgm.s.pMmio2RangesR3 = pNew;
716
717 *ppv = pvPages;
718 RTMemTmpFree(paPages);
719 return VINF_SUCCESS;
720 }
721
722 SUPPageFreeLocked(pvPages, cPages);
723 }
724 RTMemTmpFree(paPages);
725 MMR3AdjustFixedReservation(pVM, -cPages, pszDesc);
726 return rc;
727}
728
729
730/**
731 * Deregisters and frees a MMIO2 region.
732 *
733 * Any physical (and virtual) access handlers registered for the region must
734 * be deregistered before calling this function.
735 *
736 * @returns VBox status code.
737 * @param pVM Pointer to the shared VM structure.
738 * @param pDevIns The device instance owning the region.
739 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
740 */
741PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
742{
743 /*
744 * Validate input.
745 */
746 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
747 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
748 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
749
750 int rc = VINF_SUCCESS;
751 unsigned cFound = 0;
752 PPGMMMIO2RANGE pPrev = NULL;
753 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
754 while (pCur)
755 {
756 if ( pCur->pDevInsR3 == pDevIns
757 && ( iRegion == UINT32_MAX
758 || pCur->iRegion == iRegion))
759 {
760 cFound++;
761
762 /*
763 * Unmap it if it's mapped.
764 */
765 if (pCur->fMapped)
766 {
767 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
768 AssertRC(rc2);
769 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
770 rc = rc2;
771 }
772
773 /*
774 * Unlink it
775 */
776 PPGMMMIO2RANGE pNext = pCur->pNextR3;
777 if (pPrev)
778 pPrev->pNextR3 = pNext;
779 else
780 pVM->pgm.s.pMmio2RangesR3 = pNext;
781 pCur->pNextR3 = NULL;
782
783 /*
784 * Free the memory.
785 */
786 int rc2 = SUPPageFreeLocked(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
787 AssertRC(rc2);
788 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
789 rc = rc2;
790
791 rc2 = MMR3AdjustFixedReservation(pVM, -(pCur->RamRange.cb >> PAGE_SHIFT), pCur->RamRange.pszDesc);
792 AssertRC(rc2);
793 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
794 rc = rc2;
795
796 /* we're leaking hyper memory here if done at runtime. */
797 Assert( VMR3GetState(pVM) == VMSTATE_OFF
798 || VMR3GetState(pVM) == VMSTATE_DESTROYING
799 || VMR3GetState(pVM) == VMSTATE_TERMINATED);
800 /*rc = MMHyperFree(pVM, pCur);
801 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
802
803 /* next */
804 pCur = pNext;
805 }
806 else
807 {
808 pPrev = pCur;
809 pCur = pCur->pNextR3;
810 }
811 }
812
813 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
814}
815
816
817/**
818 * Maps a MMIO2 region.
819 *
820 * This is done when a guest / the bios / state loading changes the
821 * PCI config. The replacing of base memory has the same restrictions
822 * as during registration, of course.
823 *
824 * @returns VBox status code.
825 *
826 * @param pVM Pointer to the shared VM structure.
827 * @param pDevIns The
828 */
829PDMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
830{
831 /*
832 * Validate input
833 */
834 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
835 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
836 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
837 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
838 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
839 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
840
841 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
842 AssertReturn(pCur, VERR_NOT_FOUND);
843 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
844 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
845 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
846
847 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
848 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
849
850 /*
851 * Find our location in the ram range list, checking for
852 * restriction we don't bother implementing yet (partially overlapping).
853 */
854 bool fRamExists = false;
855 PPGMRAMRANGE pRamPrev = NULL;
856 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
857 while (pRam && GCPhysLast >= pRam->GCPhys)
858 {
859 if ( GCPhys <= pRam->GCPhysLast
860 && GCPhysLast >= pRam->GCPhys)
861 {
862 /* completely within? */
863 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
864 && GCPhysLast <= pRam->GCPhysLast,
865 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
866 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
867 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
868 VERR_PGM_RAM_CONFLICT);
869 fRamExists = true;
870 break;
871 }
872
873 /* next */
874 pRamPrev = pRam;
875 pRam = pRam->pNextR3;
876 }
877 if (fRamExists)
878 {
879 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
880 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
881 while (cPagesLeft-- > 0)
882 {
883 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
884 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
885 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
886 VERR_PGM_RAM_CONFLICT);
887 pPage++;
888 }
889 }
890 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
891 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
892
893 /*
894 * Make the changes.
895 */
896 pgmLock(pVM);
897
898 pCur->RamRange.GCPhys = GCPhys;
899 pCur->RamRange.GCPhysLast = GCPhysLast;
900 pCur->fMapped = true;
901 pCur->fOverlapping = fRamExists;
902
903 if (fRamExists)
904 {
905 /* replace the pages, freeing all present RAM pages. */
906 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
907 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
908 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
909 while (cPagesLeft-- > 0)
910 {
911 pgmPhysFreePage(pVM, pPageDst, GCPhys);
912
913 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
914 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
915 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
916 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
917
918 GCPhys += PAGE_SIZE;
919 pPageSrc++;
920 pPageDst++;
921 }
922 }
923 else
924 {
925 /* link in the ram range */
926 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
927 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, 0);
928 }
929
930 pgmUnlock(pVM);
931
932 return VINF_SUCCESS;
933}
934
935
936/**
937 * Unmaps a MMIO2 region.
938 *
939 * This is done when a guest / the bios / state loading changes the
940 * PCI config. The replacing of base memory has the same restrictions
941 * as during registration, of course.
942 */
943PDMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
944{
945 /*
946 * Validate input
947 */
948 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
949 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
950 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
951 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
952 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
953 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
954
955 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
956 AssertReturn(pCur, VERR_NOT_FOUND);
957 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
958 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
959 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
960
961 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
962 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
963
964 /*
965 * Unmap it.
966 */
967 pgmLock(pVM);
968
969 if (pCur->fOverlapping)
970 {
971 /* Restore the RAM pages we've replaced. */
972 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
973 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
974 pRam = pRam->pNextR3;
975
976 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
977 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
978 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
979 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
980 while (cPagesLeft-- > 0)
981 {
982 PGM_PAGE_SET_HCPHYS(pPageDst, pVM->pgm.s.HCPhysZeroPg);
983 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
984 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
985
986 pPageDst++;
987 }
988 }
989 else
990 {
991 REMR3NotifyPhysReserve(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
992 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
993 }
994
995 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
996 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
997 pCur->fOverlapping = false;
998 pCur->fMapped = false;
999
1000 pgmUnlock(pVM);
1001
1002 return VINF_SUCCESS;
1003}
1004
1005
1006/**
1007 * Checks if the given address is an MMIO2 base address or not.
1008 *
1009 * @returns true/false accordingly.
1010 * @param pVM Pointer to the shared VM structure.
1011 * @param pDevIns The owner of the memory, optional.
1012 * @param GCPhys The address to check.
1013 */
1014PDMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1015{
1016 /*
1017 * Validate input
1018 */
1019 VM_ASSERT_EMT_RETURN(pVM, false);
1020 AssertPtrReturn(pDevIns, false);
1021 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1022 AssertReturn(GCPhys != 0, false);
1023 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1024
1025 /*
1026 * Search the list.
1027 */
1028 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1029 if (pCur->RamRange.GCPhys == GCPhys)
1030 {
1031 Assert(pCur->fMapped);
1032 return true;
1033 }
1034 return false;
1035}
1036
1037
1038/**
1039 * Gets the HC physical address of a page in the MMIO2 region.
1040 *
1041 * This is API is intended for MMHyper and shouldn't be called
1042 * by anyone else...
1043 *
1044 * @returns VBox status code.
1045 * @param pVM Pointer to the shared VM structure.
1046 * @param pDevIns The owner of the memory, optional.
1047 * @param iRegion The region.
1048 * @param off The page expressed an offset into the MMIO2 region.
1049 * @param pHCPhys Where to store the result.
1050 */
1051PDMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1052{
1053 /*
1054 * Validate input
1055 */
1056 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1057 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1058 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1059
1060 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1061 AssertReturn(pCur, VERR_NOT_FOUND);
1062 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1063
1064 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1065 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1066 return VINF_SUCCESS;
1067}
1068
1069
1070/**
1071 * Registers a ROM image.
1072 *
1073 * Shadowed ROM images requires double the amount of backing memory, so,
1074 * don't use that unless you have to. Shadowing of ROM images is process
1075 * where we can select where the reads go and where the writes go. On real
1076 * hardware the chipset provides means to configure this. We provide
1077 * PGMR3PhysProtectROM() for this purpose.
1078 *
1079 * A read-only copy of the ROM image will always be kept around while we
1080 * will allocate RAM pages for the changes on demand (unless all memory
1081 * is configured to be preallocated).
1082 *
1083 * @returns VBox status.
1084 * @param pVM VM Handle.
1085 * @param pDevIns The device instance owning the ROM.
1086 * @param GCPhys First physical address in the range.
1087 * Must be page aligned!
1088 * @param cbRange The size of the range (in bytes).
1089 * Must be page aligned!
1090 * @param pvBinary Pointer to the binary data backing the ROM image.
1091 * This must be exactly \a cbRange in size.
1092 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
1093 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
1094 * @param pszDesc Pointer to description string. This must not be freed.
1095 *
1096 * @remark There is no way to remove the rom, automatically on device cleanup or
1097 * manually from the device yet. This isn't difficult in any way, it's
1098 * just not something we expect to be necessary for a while.
1099 */
1100PGMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1101 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
1102{
1103 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
1104 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
1105
1106 /*
1107 * Validate input.
1108 */
1109 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1110 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1111 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1112 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1113 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1114 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
1115 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1116 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
1117 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
1118
1119 const uint32_t cPages = cb >> PAGE_SHIFT;
1120
1121 /*
1122 * Find the ROM location in the ROM list first.
1123 */
1124 PPGMROMRANGE pRomPrev = NULL;
1125 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
1126 while (pRom && GCPhysLast >= pRom->GCPhys)
1127 {
1128 if ( GCPhys <= pRom->GCPhysLast
1129 && GCPhysLast >= pRom->GCPhys)
1130 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1131 GCPhys, GCPhysLast, pszDesc,
1132 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
1133 VERR_PGM_RAM_CONFLICT);
1134 /* next */
1135 pRomPrev = pRom;
1136 pRom = pRom->pNextR3;
1137 }
1138
1139 /*
1140 * Find the RAM location and check for conflicts.
1141 *
1142 * Conflict detection is a bit different than for RAM
1143 * registration since a ROM can be located within a RAM
1144 * range. So, what we have to check for is other memory
1145 * types (other than RAM that is) and that we don't span
1146 * more than one RAM range (layz).
1147 */
1148 bool fRamExists = false;
1149 PPGMRAMRANGE pRamPrev = NULL;
1150 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1151 while (pRam && GCPhysLast >= pRam->GCPhys)
1152 {
1153 if ( GCPhys <= pRam->GCPhysLast
1154 && GCPhysLast >= pRam->GCPhys)
1155 {
1156 /* completely within? */
1157 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1158 && GCPhysLast <= pRam->GCPhysLast,
1159 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
1160 GCPhys, GCPhysLast, pszDesc,
1161 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1162 VERR_PGM_RAM_CONFLICT);
1163 fRamExists = true;
1164 break;
1165 }
1166
1167 /* next */
1168 pRamPrev = pRam;
1169 pRam = pRam->pNextR3;
1170 }
1171 if (fRamExists)
1172 {
1173 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1174 uint32_t cPagesLeft = cPages;
1175 while (cPagesLeft-- > 0)
1176 {
1177 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1178 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
1179 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
1180 VERR_PGM_RAM_CONFLICT);
1181 Assert(PGM_PAGE_IS_ZERO(pPage));
1182 pPage++;
1183 }
1184 }
1185
1186 /*
1187 * Update the base memory reservation if necessary.
1188 */
1189 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
1190 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1191 cExtraBaseCost += cPages;
1192 if (cExtraBaseCost)
1193 {
1194 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
1195 if (RT_FAILURE(rc))
1196 return rc;
1197 }
1198
1199 /*
1200 * Allocate memory for the virgin copy of the RAM.
1201 */
1202 PGMMALLOCATEPAGESREQ pReq;
1203 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
1204 AssertRCReturn(rc, rc);
1205
1206 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1207 {
1208 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
1209 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
1210 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
1211 }
1212
1213 pgmLock(pVM);
1214 rc = GMMR3AllocatePagesPerform(pVM, pReq);
1215 pgmUnlock(pVM);
1216 if (RT_FAILURE(rc))
1217 {
1218 GMMR3AllocatePagesCleanup(pReq);
1219 return rc;
1220 }
1221
1222 /*
1223 * Allocate the new ROM range and RAM range (if necessary).
1224 */
1225 PPGMROMRANGE pRomNew;
1226 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
1227 if (RT_SUCCESS(rc))
1228 {
1229 PPGMRAMRANGE pRamNew = NULL;
1230 if (!fRamExists)
1231 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
1232 if (RT_SUCCESS(rc))
1233 {
1234 pgmLock(pVM);
1235
1236 /*
1237 * Initialize and insert the RAM range (if required).
1238 */
1239 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
1240 if (!fRamExists)
1241 {
1242 pRamNew->GCPhys = GCPhys;
1243 pRamNew->GCPhysLast = GCPhysLast;
1244 pRamNew->pszDesc = pszDesc;
1245 pRamNew->cb = cb;
1246 pRamNew->fFlags = 0;
1247 pRamNew->pvHC = NULL;
1248
1249 PPGMPAGE pPage = &pRamNew->aPages[0];
1250 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1251 {
1252 PGM_PAGE_INIT(pPage,
1253 pReq->aPages[iPage].HCPhysGCPhys,
1254 pReq->aPages[iPage].idPage,
1255 PGMPAGETYPE_ROM,
1256 PGM_PAGE_STATE_ALLOCATED);
1257
1258 pRomPage->Virgin = *pPage;
1259 }
1260
1261 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
1262 }
1263 else
1264 {
1265 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1266 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1267 {
1268 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
1269 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
1270 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1271 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
1272
1273 pRomPage->Virgin = *pPage;
1274 }
1275
1276 pRamNew = pRam;
1277 }
1278 pgmUnlock(pVM);
1279
1280
1281 /*
1282 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
1283 */
1284 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
1285#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
1286 pgmR3PhysRomWriteHandler, pRomNew,
1287#else
1288 NULL, NULL,
1289#endif
1290 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1291 NULL, "pgmPhysRomWriteHandler", MMHyperCCToGC(pVM, pRomNew), pszDesc);
1292 if (RT_SUCCESS(rc))
1293 {
1294 pgmLock(pVM);
1295
1296 /*
1297 * Copy the image over to the virgin pages.
1298 * This must be done after linking in the RAM range.
1299 */
1300 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
1301 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
1302 {
1303 void *pvDstPage;
1304 PPGMPAGEMAP pMapIgnored;
1305 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
1306 if (RT_FAILURE(rc))
1307 {
1308 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
1309 break;
1310 }
1311 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
1312 }
1313 if (RT_SUCCESS(rc))
1314 {
1315 /*
1316 * Initialize the ROM range.
1317 * Note that the Virgin member of the pages has already been initialized above.
1318 */
1319 pRomNew->GCPhys = GCPhys;
1320 pRomNew->GCPhysLast = GCPhysLast;
1321 pRomNew->cb = cb;
1322 pRomNew->fFlags = fFlags;
1323 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
1324 pRomNew->pszDesc = pszDesc;
1325
1326 for (unsigned iPage = 0; iPage < cPages; iPage++)
1327 {
1328 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
1329 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
1330 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1331 }
1332
1333 /*
1334 * Insert the ROM range, tell REM and return successfully.
1335 */
1336 pRomNew->pNextR3 = pRom;
1337 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
1338 pRomNew->pNextGC = pRom ? MMHyperCCToGC(pVM, pRom) : NIL_RTGCPTR;
1339
1340 if (pRomPrev)
1341 {
1342 pRomPrev->pNextR3 = pRomNew;
1343 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
1344 pRomPrev->pNextGC = MMHyperCCToGC(pVM, pRomNew);
1345 }
1346 else
1347 {
1348 pVM->pgm.s.pRomRangesR3 = pRomNew;
1349 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
1350 pVM->pgm.s.pRomRangesGC = MMHyperCCToGC(pVM, pRomNew);
1351 }
1352
1353 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
1354
1355 GMMR3AllocatePagesCleanup(pReq);
1356 pgmUnlock(pVM);
1357 return VINF_SUCCESS;
1358 }
1359
1360 /* bail out */
1361
1362 pgmUnlock(pVM);
1363 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1364 AssertRC(rc2);
1365 pgmLock(pVM);
1366 }
1367
1368 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
1369 if (pRamNew)
1370 MMHyperFree(pVM, pRamNew);
1371 }
1372 MMHyperFree(pVM, pRomNew);
1373 }
1374
1375 /** @todo Purge the mapping cache or something... */
1376 GMMR3FreeAllocatedPages(pVM, pReq);
1377 GMMR3AllocatePagesCleanup(pReq);
1378 pgmUnlock(pVM);
1379 return rc;
1380}
1381
1382
1383/**
1384 * \#PF Handler callback for ROM write accesses.
1385 *
1386 * @returns VINF_SUCCESS if the handler have carried out the operation.
1387 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1388 * @param pVM VM Handle.
1389 * @param GCPhys The physical address the guest is writing to.
1390 * @param pvPhys The HC mapping of that address.
1391 * @param pvBuf What the guest is reading/writing.
1392 * @param cbBuf How much it's reading/writing.
1393 * @param enmAccessType The access type.
1394 * @param pvUser User argument.
1395 */
1396/*static - shut up warning */
1397 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1398{
1399 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
1400 const uint32_t iPage = GCPhys - pRom->GCPhys;
1401 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
1402 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1403 switch (pRomPage->enmProt)
1404 {
1405 /*
1406 * Ignore.
1407 */
1408 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
1409 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
1410 return VINF_SUCCESS;
1411
1412 /*
1413 * Write to the ram page.
1414 */
1415 case PGMROMPROT_READ_ROM_WRITE_RAM:
1416 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
1417 {
1418 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
1419 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
1420
1421 /*
1422 * Take the lock, do lazy allocation, map the page and copy the data.
1423 *
1424 * Note that we have to bypass the mapping TLB since it works on
1425 * guest physical addresses and entering the shadow page would
1426 * kind of screw things up...
1427 */
1428 int rc = pgmLock(pVM);
1429 AssertRC(rc);
1430
1431 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
1432 {
1433 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
1434 if (RT_FAILURE(rc))
1435 {
1436 pgmUnlock(pVM);
1437 return rc;
1438 }
1439 }
1440
1441 void *pvDstPage;
1442 PPGMPAGEMAP pMapIgnored;
1443 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
1444 if (RT_SUCCESS(rc))
1445 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
1446
1447 pgmUnlock(pVM);
1448 return rc;
1449 }
1450
1451 default:
1452 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
1453 pRom->aPages[iPage].enmProt, iPage, GCPhys),
1454 VERR_INTERNAL_ERROR);
1455 }
1456}
1457
1458
1459
1460/**
1461 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
1462 * and verify that the virgin part is untouched.
1463 *
1464 * This is done after the normal memory has been cleared.
1465 *
1466 * ASSUMES that the caller owns the PGM lock.
1467 *
1468 * @param pVM The VM handle.
1469 */
1470int pgmR3PhysRomReset(PVM pVM)
1471{
1472 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1473 {
1474 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
1475
1476 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1477 {
1478 /*
1479 * Reset the physical handler.
1480 */
1481 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
1482 AssertRCReturn(rc, rc);
1483
1484 /*
1485 * What we do with the shadow pages depends on the memory
1486 * preallocation option. If not enabled, we'll just throw
1487 * out all the dirty pages and replace them by the zero page.
1488 */
1489 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
1490 {
1491 /* Count dirty shadow pages. */
1492 uint32_t cDirty = 0;
1493 uint32_t iPage = cPages;
1494 while (iPage-- > 0)
1495 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1496 cDirty++;
1497 if (cDirty)
1498 {
1499 /* Free the dirty pages. */
1500 PGMMFREEPAGESREQ pReq;
1501 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
1502 AssertRCReturn(rc, rc);
1503
1504 uint32_t iReqPage = 0;
1505 for (iPage = 0; iPage < cPages; iPage++)
1506 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1507 {
1508 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
1509 iReqPage++;
1510 }
1511
1512 rc = GMMR3FreePagesPerform(pVM, pReq);
1513 GMMR3FreePagesCleanup(pReq);
1514 AssertRCReturn(rc, rc);
1515
1516 /* setup the zero page. */
1517 for (iPage = 0; iPage < cPages; iPage++)
1518 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1519 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1520 }
1521 }
1522 else
1523 {
1524 /* clear all the pages. */
1525 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1526 {
1527 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1528 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
1529 if (RT_FAILURE(rc))
1530 break;
1531
1532 void *pvDstPage;
1533 PPGMPAGEMAP pMapIgnored;
1534 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
1535 if (RT_FAILURE(rc))
1536 break;
1537 ASMMemZeroPage(pvDstPage);
1538 }
1539 AssertRCReturn(rc, rc);
1540 }
1541 }
1542
1543#ifdef VBOX_STRICT
1544 /*
1545 * Verify that the virgin page is unchanged if possible.
1546 */
1547 if (pRom->pvOriginal)
1548 {
1549 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
1550 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
1551 {
1552 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1553 PPGMPAGEMAP pMapIgnored;
1554 void *pvDstPage;
1555 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
1556 if (RT_FAILURE(rc))
1557 break;
1558 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
1559 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
1560 GCPhys, pRom->pszDesc));
1561 }
1562 }
1563#endif
1564 }
1565
1566 return VINF_SUCCESS;
1567}
1568
1569
1570/**
1571 * Change the shadowing of a range of ROM pages.
1572 *
1573 * This is intended for implementing chipset specific memory registers
1574 * and will not be very strict about the input. It will silently ignore
1575 * any pages that are not the part of a shadowed ROM.
1576 *
1577 * @returns VBox status code.
1578 * @param pVM Pointer to the shared VM structure.
1579 * @param GCPhys Where to start. Page aligned.
1580 * @param cb How much to change. Page aligned.
1581 * @param enmProt The new ROM protection.
1582 */
1583PGMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
1584{
1585 /*
1586 * Check input
1587 */
1588 if (!cb)
1589 return VINF_SUCCESS;
1590 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1591 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1592 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1593 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1594 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
1595
1596 /*
1597 * Process the request.
1598 */
1599 bool fFlushedPool = false;
1600 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1601 if ( GCPhys <= pRom->GCPhysLast
1602 && GCPhysLast >= pRom->GCPhys)
1603 {
1604 /*
1605 * Iterate the relevant pages and the ncessary make changes.
1606 */
1607 bool fChanges = false;
1608 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
1609 ? pRom->cb >> PAGE_SHIFT
1610 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
1611 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1612 iPage < cPages;
1613 iPage++)
1614 {
1615 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1616 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
1617 {
1618 fChanges = true;
1619
1620 /* flush the page pool first so we don't leave any usage references dangling. */
1621 if (!fFlushedPool)
1622 {
1623 pgmPoolFlushAll(pVM);
1624 fFlushedPool = true;
1625 }
1626
1627 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1628 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1629 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
1630
1631 *pOld = *pRamPage;
1632 *pRamPage = *pNew;
1633 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
1634 }
1635 }
1636
1637 /*
1638 * Reset the access handler if we made changes, no need
1639 * to optimize this.
1640 */
1641 if (fChanges)
1642 {
1643 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
1644 AssertRCReturn(rc, rc);
1645 }
1646
1647 /* Advance - cb isn't updated. */
1648 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
1649 }
1650
1651 return VINF_SUCCESS;
1652}
1653
1654
1655/**
1656 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
1657 * registration APIs calls to inform PGM about memory registrations.
1658 *
1659 * It registers the physical memory range with PGM. MM is responsible
1660 * for the toplevel things - allocation and locking - while PGM is taking
1661 * care of all the details and implements the physical address space virtualization.
1662 *
1663 * @returns VBox status.
1664 * @param pVM The VM handle.
1665 * @param pvRam HC virtual address of the RAM range. (page aligned)
1666 * @param GCPhys GC physical address of the RAM range. (page aligned)
1667 * @param cb Size of the RAM range. (page aligned)
1668 * @param fFlags Flags, MM_RAM_*.
1669 * @param paPages Pointer an array of physical page descriptors.
1670 * @param pszDesc Description string.
1671 */
1672PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1673{
1674 /*
1675 * Validate input.
1676 * (Not so important because callers are only MMR3PhysRegister()
1677 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1678 */
1679 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1680
1681 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
1682 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
1683 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
1684 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
1685 Assert(!(fFlags & ~0xfff));
1686 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1687 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1688 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1689 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1690 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1691 if (GCPhysLast < GCPhys)
1692 {
1693 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1694 return VERR_INVALID_PARAMETER;
1695 }
1696
1697 /*
1698 * Find range location and check for conflicts.
1699 */
1700 PPGMRAMRANGE pPrev = NULL;
1701 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1702 while (pCur)
1703 {
1704 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1705 {
1706 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1707 return VERR_PGM_RAM_CONFLICT;
1708 }
1709 if (GCPhysLast < pCur->GCPhys)
1710 break;
1711
1712 /* next */
1713 pPrev = pCur;
1714 pCur = pCur->pNextR3;
1715 }
1716
1717 /*
1718 * Allocate RAM range.
1719 * Small ranges are allocated from the heap, big ones have separate mappings.
1720 */
1721 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
1722 PPGMRAMRANGE pNew;
1723 RTGCPTR GCPtrNew;
1724 int rc = VERR_NO_MEMORY;
1725 if (cbRam > PAGE_SIZE / 2)
1726 { /* large */
1727 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
1728 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
1729 if (VBOX_SUCCESS(rc))
1730 {
1731 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
1732 if (VBOX_SUCCESS(rc))
1733 {
1734 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
1735 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1736 }
1737 else
1738 {
1739 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
1740 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
1741 }
1742 }
1743 else
1744 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
1745
1746 }
1747/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
1748 if (RT_FAILURE(rc))
1749 { /* small + fallback (vga) */
1750 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1751 if (VBOX_SUCCESS(rc))
1752 GCPtrNew = MMHyperHC2GC(pVM, pNew);
1753 else
1754 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
1755 }
1756 if (VBOX_SUCCESS(rc))
1757 {
1758 /*
1759 * Initialize the range.
1760 */
1761 pNew->pvHC = pvRam;
1762 pNew->GCPhys = GCPhys;
1763 pNew->GCPhysLast = GCPhysLast;
1764 pNew->cb = cb;
1765 pNew->fFlags = fFlags;
1766 pNew->pavHCChunkHC = NULL;
1767 pNew->pavHCChunkGC = 0;
1768
1769 unsigned iPage = cb >> PAGE_SHIFT;
1770 if (paPages)
1771 {
1772 while (iPage-- > 0)
1773 {
1774 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1775 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
1776 PGM_PAGE_STATE_ALLOCATED);
1777 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1778 }
1779 }
1780 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1781 {
1782 /* Allocate memory for chunk to HC ptr lookup array. */
1783 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
1784 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
1785
1786 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
1787 Assert(pNew->pavHCChunkGC);
1788
1789 /* Physical memory will be allocated on demand. */
1790 while (iPage-- > 0)
1791 {
1792 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
1793 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1794 }
1795 }
1796 else
1797 {
1798 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1799 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
1800 while (iPage-- > 0)
1801 {
1802 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
1803 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1804 }
1805 }
1806
1807 /*
1808 * Insert the new RAM range.
1809 */
1810 pgmLock(pVM);
1811 pNew->pNextR3 = pCur;
1812 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1813 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : NIL_RTGCPTR;
1814 if (pPrev)
1815 {
1816 pPrev->pNextR3 = pNew;
1817 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1818 pPrev->pNextGC = GCPtrNew;
1819 }
1820 else
1821 {
1822 pVM->pgm.s.pRamRangesR3 = pNew;
1823 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1824 pVM->pgm.s.pRamRangesGC = GCPtrNew;
1825 }
1826 pgmUnlock(pVM);
1827 }
1828 return rc;
1829}
1830
1831#ifndef VBOX_WITH_NEW_PHYS_CODE
1832
1833/**
1834 * Register a chunk of a the physical memory range with PGM. MM is responsible
1835 * for the toplevel things - allocation and locking - while PGM is taking
1836 * care of all the details and implements the physical address space virtualization.
1837 *
1838 *
1839 * @returns VBox status.
1840 * @param pVM The VM handle.
1841 * @param pvRam HC virtual address of the RAM range. (page aligned)
1842 * @param GCPhys GC physical address of the RAM range. (page aligned)
1843 * @param cb Size of the RAM range. (page aligned)
1844 * @param fFlags Flags, MM_RAM_*.
1845 * @param paPages Pointer an array of physical page descriptors.
1846 * @param pszDesc Description string.
1847 */
1848PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1849{
1850 NOREF(pszDesc);
1851
1852 /*
1853 * Validate input.
1854 * (Not so important because callers are only MMR3PhysRegister()
1855 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1856 */
1857 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1858
1859 Assert(paPages);
1860 Assert(pvRam);
1861 Assert(!(fFlags & ~0xfff));
1862 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1863 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1864 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1865 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1866 Assert(VM_IS_EMT(pVM));
1867 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1868 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1869
1870 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1871 if (GCPhysLast < GCPhys)
1872 {
1873 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1874 return VERR_INVALID_PARAMETER;
1875 }
1876
1877 /*
1878 * Find existing range location.
1879 */
1880 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1881 while (pRam)
1882 {
1883 RTGCPHYS off = GCPhys - pRam->GCPhys;
1884 if ( off < pRam->cb
1885 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1886 break;
1887
1888 pRam = CTXALLSUFF(pRam->pNext);
1889 }
1890 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1891
1892 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1893 unsigned iPage = cb >> PAGE_SHIFT;
1894 if (paPages)
1895 {
1896 while (iPage-- > 0)
1897 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1898 }
1899 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1900 pRam->pavHCChunkHC[off] = pvRam;
1901
1902 /* Notify the recompiler. */
1903 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1904
1905 return VINF_SUCCESS;
1906}
1907
1908
1909/**
1910 * Allocate missing physical pages for an existing guest RAM range.
1911 *
1912 * @returns VBox status.
1913 * @param pVM The VM handle.
1914 * @param GCPhys GC physical address of the RAM range. (page aligned)
1915 */
1916PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
1917{
1918 RTGCPHYS GCPhys = *pGCPhys;
1919
1920 /*
1921 * Walk range list.
1922 */
1923 pgmLock(pVM);
1924
1925 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1926 while (pRam)
1927 {
1928 RTGCPHYS off = GCPhys - pRam->GCPhys;
1929 if ( off < pRam->cb
1930 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1931 {
1932 bool fRangeExists = false;
1933 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1934
1935 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1936 if (pRam->pavHCChunkHC[off])
1937 fRangeExists = true;
1938
1939 pgmUnlock(pVM);
1940 if (fRangeExists)
1941 return VINF_SUCCESS;
1942 return pgmr3PhysGrowRange(pVM, GCPhys);
1943 }
1944
1945 pRam = CTXALLSUFF(pRam->pNext);
1946 }
1947 pgmUnlock(pVM);
1948 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1949}
1950
1951
1952/**
1953 * Allocate missing physical pages for an existing guest RAM range.
1954 *
1955 * @returns VBox status.
1956 * @param pVM The VM handle.
1957 * @param pRamRange RAM range
1958 * @param GCPhys GC physical address of the RAM range. (page aligned)
1959 */
1960int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1961{
1962 void *pvRam;
1963 int rc;
1964
1965 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1966 if (!VM_IS_EMT(pVM))
1967 {
1968 PVMREQ pReq;
1969 const RTGCPHYS GCPhysParam = GCPhys;
1970
1971 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1972
1973 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
1974 if (VBOX_SUCCESS(rc))
1975 {
1976 rc = pReq->iStatus;
1977 VMR3ReqFree(pReq);
1978 }
1979 return rc;
1980 }
1981
1982 /* Round down to chunk boundary */
1983 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1984
1985 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
1986 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1987
1988 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1989
1990 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1991
1992 for (;;)
1993 {
1994 rc = SUPPageAlloc(cPages, &pvRam);
1995 if (VBOX_SUCCESS(rc))
1996 {
1997
1998 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
1999 if (VBOX_SUCCESS(rc))
2000 return rc;
2001
2002 SUPPageFree(pvRam, cPages);
2003 }
2004
2005 VMSTATE enmVMState = VMR3GetState(pVM);
2006 if (enmVMState != VMSTATE_RUNNING)
2007 {
2008 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
2009 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
2010 return rc;
2011 }
2012
2013 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
2014
2015 /* Pause first, then inform Main. */
2016 rc = VMR3SuspendNoSave(pVM);
2017 AssertRC(rc);
2018
2019 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
2020
2021 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
2022 rc = VMR3WaitForResume(pVM);
2023
2024 /* Retry */
2025 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
2026 }
2027}
2028
2029#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2030
2031
2032/**
2033 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
2034 * flags of existing RAM ranges.
2035 *
2036 * @returns VBox status.
2037 * @param pVM The VM handle.
2038 * @param GCPhys GC physical address of the RAM range. (page aligned)
2039 * @param cb Size of the RAM range. (page aligned)
2040 * @param fFlags The Or flags, MM_RAM_* \#defines.
2041 * @param fMask The and mask for the flags.
2042 */
2043PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
2044{
2045 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
2046
2047 /*
2048 * Validate input.
2049 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
2050 */
2051 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
2052 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2053 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2054 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2055 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2056
2057 /*
2058 * Lookup the range.
2059 */
2060 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
2061 while (pRam && GCPhys > pRam->GCPhysLast)
2062 pRam = CTXALLSUFF(pRam->pNext);
2063 if ( !pRam
2064 || GCPhys > pRam->GCPhysLast
2065 || GCPhysLast < pRam->GCPhys)
2066 {
2067 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
2068 return VERR_INVALID_PARAMETER;
2069 }
2070
2071 /*
2072 * Update the requested flags.
2073 */
2074 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
2075 | fMask;
2076 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
2077 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2078 for ( ; iPage < iPageEnd; iPage++)
2079 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
2080
2081 return VINF_SUCCESS;
2082}
2083
2084
2085/**
2086 * Sets the Address Gate 20 state.
2087 *
2088 * @param pVM VM handle.
2089 * @param fEnable True if the gate should be enabled.
2090 * False if the gate should be disabled.
2091 */
2092PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
2093{
2094 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
2095 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
2096 {
2097 pVM->pgm.s.fA20Enabled = fEnable;
2098 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2099 REMR3A20Set(pVM, fEnable);
2100 }
2101}
2102
2103
2104/**
2105 * Tree enumeration callback for dealing with age rollover.
2106 * It will perform a simple compression of the current age.
2107 */
2108static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2109{
2110 /* Age compression - ASSUMES iNow == 4. */
2111 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2112 if (pChunk->iAge >= UINT32_C(0xffffff00))
2113 pChunk->iAge = 3;
2114 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2115 pChunk->iAge = 2;
2116 else if (pChunk->iAge)
2117 pChunk->iAge = 1;
2118 else /* iAge = 0 */
2119 pChunk->iAge = 4;
2120
2121 /* reinsert */
2122 PVM pVM = (PVM)pvUser;
2123 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2124 pChunk->AgeCore.Key = pChunk->iAge;
2125 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2126 return 0;
2127}
2128
2129
2130/**
2131 * Tree enumeration callback that updates the chunks that have
2132 * been used since the last
2133 */
2134static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2135{
2136 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2137 if (!pChunk->iAge)
2138 {
2139 PVM pVM = (PVM)pvUser;
2140 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2141 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2142 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2143 }
2144
2145 return 0;
2146}
2147
2148
2149/**
2150 * Performs ageing of the ring-3 chunk mappings.
2151 *
2152 * @param pVM The VM handle.
2153 */
2154PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2155{
2156 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2157 pVM->pgm.s.ChunkR3Map.iNow++;
2158 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2159 {
2160 pVM->pgm.s.ChunkR3Map.iNow = 4;
2161 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2162 }
2163 else
2164 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2165}
2166
2167
2168/**
2169 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2170 */
2171typedef struct PGMR3PHYSCHUNKUNMAPCB
2172{
2173 PVM pVM; /**< The VM handle. */
2174 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2175} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2176
2177
2178/**
2179 * Callback used to find the mapping that's been unused for
2180 * the longest time.
2181 */
2182static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2183{
2184 do
2185 {
2186 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2187 if ( pChunk->iAge
2188 && !pChunk->cRefs)
2189 {
2190 /*
2191 * Check that it's not in any of the TLBs.
2192 */
2193 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2194 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2195 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2196 {
2197 pChunk = NULL;
2198 break;
2199 }
2200 if (pChunk)
2201 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2202 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2203 {
2204 pChunk = NULL;
2205 break;
2206 }
2207 if (pChunk)
2208 {
2209 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2210 return 1; /* done */
2211 }
2212 }
2213
2214 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2215 pNode = pNode->pList;
2216 } while (pNode);
2217 return 0;
2218}
2219
2220
2221/**
2222 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2223 *
2224 * The candidate will not be part of any TLBs, so no need to flush
2225 * anything afterwards.
2226 *
2227 * @returns Chunk id.
2228 * @param pVM The VM handle.
2229 */
2230static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2231{
2232 /*
2233 * Do tree ageing first?
2234 */
2235 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2236 PGMR3PhysChunkAgeing(pVM);
2237
2238 /*
2239 * Enumerate the age tree starting with the left most node.
2240 */
2241 PGMR3PHYSCHUNKUNMAPCB Args;
2242 Args.pVM = pVM;
2243 Args.pChunk = NULL;
2244 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2245 return Args.pChunk->Core.Key;
2246 return INT32_MAX;
2247}
2248
2249
2250/**
2251 * Maps the given chunk into the ring-3 mapping cache.
2252 *
2253 * This will call ring-0.
2254 *
2255 * @returns VBox status code.
2256 * @param pVM The VM handle.
2257 * @param idChunk The chunk in question.
2258 * @param ppChunk Where to store the chunk tracking structure.
2259 *
2260 * @remarks Called from within the PGM critical section.
2261 */
2262int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
2263{
2264 int rc;
2265 /*
2266 * Allocate a new tracking structure first.
2267 */
2268#if 0 /* for later when we've got a separate mapping method for ring-0. */
2269 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
2270 AssertReturn(pChunk, VERR_NO_MEMORY);
2271#else
2272 PPGMCHUNKR3MAP pChunk;
2273 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
2274 AssertRCReturn(rc, rc);
2275#endif
2276 pChunk->Core.Key = idChunk;
2277 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
2278 pChunk->iAge = 0;
2279 pChunk->cRefs = 0;
2280 pChunk->cPermRefs = 0;
2281 pChunk->pv = NULL;
2282
2283 /*
2284 * Request the ring-0 part to map the chunk in question and if
2285 * necessary unmap another one to make space in the mapping cache.
2286 */
2287 GMMMAPUNMAPCHUNKREQ Req;
2288 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
2289 Req.Hdr.cbReq = sizeof(Req);
2290 Req.pvR3 = NULL;
2291 Req.idChunkMap = idChunk;
2292 Req.idChunkUnmap = INT32_MAX;
2293 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
2294 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
2295 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
2296 if (VBOX_SUCCESS(rc))
2297 {
2298 /*
2299 * Update the tree.
2300 */
2301 /* insert the new one. */
2302 AssertPtr(Req.pvR3);
2303 pChunk->pv = Req.pvR3;
2304 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2305 AssertRelease(fRc);
2306 pVM->pgm.s.ChunkR3Map.c++;
2307
2308 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2309 AssertRelease(fRc);
2310
2311 /* remove the unmapped one. */
2312 if (Req.idChunkUnmap != INT32_MAX)
2313 {
2314 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2315 AssertRelease(pUnmappedChunk);
2316 pUnmappedChunk->pv = NULL;
2317 pUnmappedChunk->Core.Key = UINT32_MAX;
2318#if 0 /* for later when we've got a separate mapping method for ring-0. */
2319 MMR3HeapFree(pUnmappedChunk);
2320#else
2321 MMHyperFree(pVM, pUnmappedChunk);
2322#endif
2323 pVM->pgm.s.ChunkR3Map.c--;
2324 }
2325 }
2326 else
2327 {
2328 AssertRC(rc);
2329#if 0 /* for later when we've got a separate mapping method for ring-0. */
2330 MMR3HeapFree(pChunk);
2331#else
2332 MMHyperFree(pVM, pChunk);
2333#endif
2334 pChunk = NULL;
2335 }
2336
2337 *ppChunk = pChunk;
2338 return rc;
2339}
2340
2341
2342/**
2343 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
2344 *
2345 * @returns see pgmR3PhysChunkMap.
2346 * @param pVM The VM handle.
2347 * @param idChunk The chunk to map.
2348 */
2349PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
2350{
2351 PPGMCHUNKR3MAP pChunk;
2352 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
2353}
2354
2355
2356/**
2357 * Invalidates the TLB for the ring-3 mapping cache.
2358 *
2359 * @param pVM The VM handle.
2360 */
2361PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
2362{
2363 pgmLock(pVM);
2364 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2365 {
2366 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
2367 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
2368 }
2369 pgmUnlock(pVM);
2370}
2371
2372
2373/**
2374 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
2375 *
2376 * @returns The following VBox status codes.
2377 * @retval VINF_SUCCESS on success. FF cleared.
2378 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
2379 *
2380 * @param pVM The VM handle.
2381 */
2382PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
2383{
2384 pgmLock(pVM);
2385 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2386 if (rc == VERR_GMM_SEED_ME)
2387 {
2388 void *pvChunk;
2389 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
2390 if (VBOX_SUCCESS(rc))
2391 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
2392 if (VBOX_FAILURE(rc))
2393 {
2394 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
2395 rc = VINF_EM_NO_MEMORY;
2396 }
2397 }
2398 pgmUnlock(pVM);
2399 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
2400 return rc;
2401}
2402
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette