VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 7624

Last change on this file since 7624 was 7613, checked in by vboxsync, 17 years ago

better fix for r29105 (see #2680): Make PGM3PhysGrowRange() take PCRTGCPHYS not RTGCPHYS to be able to call it with VMR3ReqCall()

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.4 KB
Line 
1/* $Id: PGMPhys.cpp 7613 2008-03-28 08:25:54Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/pgm.h>
24#include <VBox/cpum.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/csam.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/dbg.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#include <iprt/thread.h>
41#include <iprt/string.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47/*static - shut up warning */
48DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
49
50
51
52/*
53 * PGMR3PhysReadByte/Word/Dword
54 * PGMR3PhysWriteByte/Word/Dword
55 */
56/** @todo rename and add U64. */
57
58#define PGMPHYSFN_READNAME PGMR3PhysReadByte
59#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
60#define PGMPHYS_DATASIZE 1
61#define PGMPHYS_DATATYPE uint8_t
62#include "PGMPhys.h"
63
64#define PGMPHYSFN_READNAME PGMR3PhysReadWord
65#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
66#define PGMPHYS_DATASIZE 2
67#define PGMPHYS_DATATYPE uint16_t
68#include "PGMPhys.h"
69
70#define PGMPHYSFN_READNAME PGMR3PhysReadDword
71#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
72#define PGMPHYS_DATASIZE 4
73#define PGMPHYS_DATATYPE uint32_t
74#include "PGMPhys.h"
75
76
77
78/**
79 * Links a new RAM range into the list.
80 *
81 * @param pVM Pointer to the shared VM structure.
82 * @param pNew Pointer to the new list entry.
83 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
84 */
85static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
86{
87 pgmLock(pVM);
88
89 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
90 pNew->pNextR3 = pRam;
91 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
92 pNew->pNextGC = pRam ? MMHyperCCToGC(pVM, pRam) : NIL_RTGCPTR;
93
94 if (pPrev)
95 {
96 pPrev->pNextR3 = pNew;
97 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
98 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
99 }
100 else
101 {
102 pVM->pgm.s.pRamRangesR3 = pNew;
103 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
104 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
105 }
106
107 pgmUnlock(pVM);
108}
109
110
111/**
112 * Links a new RAM range into the list.
113 *
114 * @param pVM Pointer to the shared VM structure.
115 * @param pRam Pointer to the new list entry.
116 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
117 */
118static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
119{
120 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
121
122 pgmLock(pVM);
123
124 PPGMRAMRANGE pNext = pRam->pNextR3;
125 if (pPrev)
126 {
127 pPrev->pNextR3 = pNext;
128 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
129 pPrev->pNextGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
130 }
131 else
132 {
133 pVM->pgm.s.pRamRangesR3 = pNext;
134 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
135 pVM->pgm.s.pRamRangesGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
136 }
137
138 pgmUnlock(pVM);
139}
140
141
142
143/**
144 * Sets up a range RAM.
145 *
146 * This will check for conflicting registrations, make a resource
147 * reservation for the memory (with GMM), and setup the per-page
148 * tracking structures (PGMPAGE).
149 *
150 * @returns VBox stutus code.
151 * @param pVM Pointer to the shared VM structure.
152 * @param GCPhys The physical address of the RAM.
153 * @param cb The size of the RAM.
154 * @param pszDesc The description - not copied, so, don't free or change it.
155 */
156PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
157{
158 /*
159 * Validate input.
160 */
161 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
162 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
163 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
164 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
165 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
166 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
167 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
168 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
169
170 /*
171 * Find range location and check for conflicts.
172 * (We don't lock here because the locking by EMT is only required on update.)
173 */
174 PPGMRAMRANGE pPrev = NULL;
175 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
176 while (pRam && GCPhysLast >= pRam->GCPhys)
177 {
178 if ( GCPhys <= pRam->GCPhysLast
179 && GCPhysLast >= pRam->GCPhys)
180 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
181 GCPhys, GCPhysLast, pszDesc,
182 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
183 VERR_PGM_RAM_CONFLICT);
184
185 /* next */
186 pPrev = pRam;
187 pRam = pRam->pNextR3;
188 }
189
190 /*
191 * Register it with GMM (the API bitches).
192 */
193 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
194 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
195 if (RT_FAILURE(rc))
196 return rc;
197
198 /*
199 * Allocate RAM range.
200 */
201 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
202 PPGMRAMRANGE pNew;
203 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
204 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zd\n", cbRamRange), rc);
205
206 /*
207 * Initialize the range.
208 */
209 pNew->GCPhys = GCPhys;
210 pNew->GCPhysLast = GCPhysLast;
211 pNew->pszDesc = pszDesc;
212 pNew->cb = cb;
213 pNew->fFlags = 0;
214 pNew->pvHC = NULL;
215
216 pNew->pavHCChunkHC = NULL;
217 pNew->pavHCChunkGC = 0;
218
219#ifndef VBOX_WITH_NEW_PHYS_CODE
220 /* Allocate memory for chunk to HC ptr lookup array. */
221 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
222 AssertRCReturn(rc, rc);
223 pNew->pavHCChunkGC = MMHyperCCToGC(pVM, pNew->pavHCChunkHC);
224 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
225
226#endif
227 RTGCPHYS iPage = cPages;
228 while (iPage-- > 0)
229 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
230
231 /*
232 * Insert the new RAM range.
233 */
234 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
235
236 /*
237 * Notify REM.
238 */
239#ifdef VBOX_WITH_NEW_PHYS_CODE
240 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
241#else
242 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
243#endif
244
245 return VINF_SUCCESS;
246}
247
248
249/**
250 * This is the interface IOM is using to register an MMIO region.
251 *
252 * It will check for conflicts and ensure that a RAM range structure
253 * is present before calling the PGMR3HandlerPhysicalRegister API to
254 * register the callbacks.
255 *
256 */
257PDMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
258{
259 return -1;
260}
261
262
263/**
264 * This is the interface IOM is using to register an MMIO region.
265 *
266 * It will validate the MMIO region, call PGMHandlerPhysicalDeregister,
267 * and free the RAM range if one was allocated specially for this MMIO
268 * region.
269 */
270PDMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
271{
272 return -1;
273}
274
275
276/**
277 * Allocate and register a MMIO2 region.
278 *
279 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
280 * RAM associated with a device. It is also non-shared memory with a
281 * permanent ring-3 mapping and page backing (presently).
282 *
283 * A MMIO2 range may overlap with base memory if a lot of RAM
284 * is configured for the VM, in which case we'll drop the base
285 * memory pages. Presently we will make no attempt to preserve
286 * anything that happens to be present in the base memory that
287 * is replaced, this is of course incorrectly but it's too much
288 * effort.
289 */
290PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb, void **ppv, const char *pszDesc)
291{
292 return -1;
293}
294
295
296/**
297 * Reallocates a MMIO2 region.
298 *
299 * This is done when a guest / the bios / state loading changes the
300 * PCI config. The replacing of base memory has the same restrictions
301 * as during registration, of course.
302 */
303PDMR3DECL(int) PGMR3PhysMMIO2Relocate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew)
304{
305 return -1;
306}
307
308
309/**
310 * Deregisters and frees a MMIO2 region.
311 *
312 * Any physical (and virtual) access handlers registered for the region must
313 * be deregistered before calling this function.
314 */
315PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, RTGCPHYS GCPhys, void *pv)
316{
317 return -1;
318}
319
320
321/**
322 * Registers a ROM image.
323 *
324 * Shadowed ROM images requires double the amount of backing memory, so,
325 * don't use that unless you have to. Shadowing of ROM images is process
326 * where we can select where the reads go and where the writes go. On real
327 * hardware the chipset provides means to configure this. We provide
328 * PGMR3PhysProtectROM() for this purpose.
329 *
330 * A read-only copy of the ROM image will always be kept around while we
331 * will allocate RAM pages for the changes on demand (unless all memory
332 * is configured to be preallocated).
333 *
334 * @returns VBox status.
335 * @param pVM VM Handle.
336 * @param pDevIns The device instance owning the ROM.
337 * @param GCPhys First physical address in the range.
338 * Must be page aligned!
339 * @param cbRange The size of the range (in bytes).
340 * Must be page aligned!
341 * @param pvBinary Pointer to the binary data backing the ROM image.
342 * This must be exactly \a cbRange in size.
343 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
344 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
345 * @param pszDesc Pointer to description string. This must not be freed.
346 *
347 * @remark There is no way to remove the rom, automatically on device cleanup or
348 * manually from the device yet. This isn't difficult in any way, it's
349 * just not something we expect to be necessary for a while.
350 */
351PGMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
352 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
353{
354 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
355 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
356
357 /*
358 * Validate input.
359 */
360 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
361 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
362 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
363 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
364 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
365 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
366 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
367 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
368 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
369
370 const uint32_t cPages = cb >> PAGE_SHIFT;
371
372 /*
373 * Find the ROM location in the ROM list first.
374 */
375 PPGMROMRANGE pRomPrev = NULL;
376 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
377 while (pRom && GCPhysLast >= pRom->GCPhys)
378 {
379 if ( GCPhys <= pRom->GCPhysLast
380 && GCPhysLast >= pRom->GCPhys)
381 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
382 GCPhys, GCPhysLast, pszDesc,
383 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
384 VERR_PGM_RAM_CONFLICT);
385 /* next */
386 pRomPrev = pRom;
387 pRom = pRom->pNextR3;
388 }
389
390 /*
391 * Find the RAM location and check for conflicts.
392 *
393 * Conflict detection is a bit different than for RAM
394 * registration since a ROM can be located within a RAM
395 * range. So, what we have to check for is other memory
396 * types (other than RAM that is) and that we don't span
397 * more than one RAM range (layz).
398 */
399 bool fRamExists = false;
400 PPGMRAMRANGE pRamPrev = NULL;
401 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
402 while (pRam && GCPhysLast >= pRam->GCPhys)
403 {
404 if ( GCPhys <= pRam->GCPhysLast
405 && GCPhysLast >= pRam->GCPhys)
406 {
407 /* completely within? */
408 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
409 && GCPhysLast <= pRam->GCPhysLast,
410 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
411 GCPhys, GCPhysLast, pszDesc,
412 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
413 VERR_PGM_RAM_CONFLICT);
414 fRamExists = true;
415 break;
416 }
417
418 /* next */
419 pRamPrev = pRam;
420 pRam = pRam->pNextR3;
421 }
422 if (fRamExists)
423 {
424 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
425 uint32_t cPagesLeft = cPages;
426 while (cPagesLeft-- > 0)
427 {
428 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
429 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
430 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
431 VERR_PGM_RAM_CONFLICT);
432 Assert(PGM_PAGE_IS_ZERO(pPage));
433 }
434 }
435
436 /*
437 * Update the base memory reservation if necessary.
438 */
439 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
440 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
441 cExtraBaseCost += cPages;
442 if (cExtraBaseCost)
443 {
444 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
445 if (RT_FAILURE(rc))
446 return rc;
447 }
448
449 /*
450 * Allocate memory for the virgin copy of the RAM.
451 */
452 PGMMALLOCATEPAGESREQ pReq;
453 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
454 AssertRCReturn(rc, rc);
455
456 for (uint32_t iPage = 0; iPage < cPages; iPage++)
457 {
458 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
459 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
460 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
461 }
462
463 pgmLock(pVM);
464 rc = GMMR3AllocatePagesPerform(pVM, pReq);
465 pgmUnlock(pVM);
466 if (RT_FAILURE(rc))
467 {
468 GMMR3AllocatePagesCleanup(pReq);
469 return rc;
470 }
471
472 /*
473 * Allocate the new ROM range and RAM range (if necessary).
474 */
475 PPGMROMRANGE pRomNew;
476 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
477 if (RT_SUCCESS(rc))
478 {
479 PPGMRAMRANGE pRamNew = NULL;
480 if (!fRamExists)
481 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
482 if (RT_SUCCESS(rc))
483 {
484 pgmLock(pVM);
485
486 /*
487 * Initialize and insert the RAM range (if required).
488 */
489 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
490 if (!fRamExists)
491 {
492 pRamNew->GCPhys = GCPhys;
493 pRamNew->GCPhysLast = GCPhysLast;
494 pRamNew->pszDesc = pszDesc;
495 pRamNew->cb = cb;
496 pRamNew->fFlags = 0;
497 pRamNew->pvHC = NULL;
498
499 PPGMPAGE pPage = &pRamNew->aPages[0];
500 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
501 {
502 PGM_PAGE_INIT(pPage,
503 pReq->aPages[iPage].HCPhysGCPhys,
504 pReq->aPages[iPage].idPage,
505 PGMPAGETYPE_ROM,
506 PGM_PAGE_STATE_ALLOCATED);
507
508 pRomPage->Virgin = *pPage;
509 }
510
511 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
512 }
513 else
514 {
515 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
516 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
517 {
518 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
519 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
520 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
521 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
522
523 pRomPage->Virgin = *pPage;
524 }
525
526 pRamNew = pRam;
527 }
528 pgmUnlock(pVM);
529
530
531 /*
532 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
533 */
534 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
535#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
536 pgmR3PhysRomWriteHandler, pRomNew,
537#else
538 NULL, NULL,
539#endif
540 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
541 NULL, "pgmPhysRomWriteHandler", MMHyperCCToGC(pVM, pRomNew), pszDesc);
542 if (RT_SUCCESS(rc))
543 {
544 pgmLock(pVM);
545
546 /*
547 * Copy the image over to the virgin pages.
548 * This must be done after linking in the RAM range.
549 */
550 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
551 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
552 {
553 void *pvDstPage;
554 PPGMPAGEMAP pMapIgnored;
555 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
556 if (RT_FAILURE(rc))
557 {
558 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
559 break;
560 }
561 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
562 }
563 if (RT_SUCCESS(rc))
564 {
565 /*
566 * Initialize the ROM range.
567 * Note that the Virgin member of the pages has already been initialized above.
568 */
569 pRomNew->GCPhys = GCPhys;
570 pRomNew->GCPhysLast = GCPhysLast;
571 pRomNew->cb = cb;
572 pRomNew->fFlags = fFlags;
573 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
574 pRomNew->pszDesc = pszDesc;
575
576 for (unsigned iPage = 0; iPage < cPages; iPage++)
577 {
578 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
579 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
580 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
581 }
582
583 /*
584 * Insert the ROM range, tell REM and return successfully.
585 */
586 pRomNew->pNextR3 = pRom;
587 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
588 pRomNew->pNextGC = pRom ? MMHyperCCToGC(pVM, pRom) : NIL_RTGCPTR;
589
590 if (pRomPrev)
591 {
592 pRomPrev->pNextR3 = pRomNew;
593 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
594 pRomPrev->pNextGC = MMHyperCCToGC(pVM, pRomNew);
595 }
596 else
597 {
598 pVM->pgm.s.pRomRangesR3 = pRomNew;
599 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
600 pVM->pgm.s.pRomRangesGC = MMHyperCCToGC(pVM, pRomNew);
601 }
602
603 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
604
605 GMMR3AllocatePagesCleanup(pReq);
606 pgmUnlock(pVM);
607 return VINF_SUCCESS;
608 }
609
610 /* bail out */
611
612 pgmUnlock(pVM);
613 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
614 AssertRC(rc2);
615 pgmLock(pVM);
616 }
617
618 pgmR3PhysUnlinkRamRange(pVM, pRamNew, pRamPrev);
619 if (pRamNew)
620 MMHyperFree(pVM, pRamNew);
621 }
622 MMHyperFree(pVM, pRomNew);
623 }
624
625 /** @todo Purge the mapping cache or something... */
626 GMMR3FreeAllocatedPages(pVM, pReq);
627 GMMR3AllocatePagesCleanup(pReq);
628 pgmUnlock(pVM);
629 return rc;
630}
631
632
633/**
634 * \#PF Handler callback for ROM write accesses.
635 *
636 * @returns VINF_SUCCESS if the handler have carried out the operation.
637 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
638 * @param pVM VM Handle.
639 * @param GCPhys The physical address the guest is writing to.
640 * @param pvPhys The HC mapping of that address.
641 * @param pvBuf What the guest is reading/writing.
642 * @param cbBuf How much it's reading/writing.
643 * @param enmAccessType The access type.
644 * @param pvUser User argument.
645 */
646/*static - shut up warning */
647 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
648{
649 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
650 const uint32_t iPage = GCPhys - pRom->GCPhys;
651 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
652 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
653 switch (pRomPage->enmProt)
654 {
655 /*
656 * Ignore.
657 */
658 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
659 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
660 return VINF_SUCCESS;
661
662 /*
663 * Write to the ram page.
664 */
665 case PGMROMPROT_READ_ROM_WRITE_RAM:
666 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
667 {
668 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
669 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
670
671 /*
672 * Take the lock, do lazy allocation, map the page and copy the data.
673 *
674 * Note that we have to bypass the mapping TLB since it works on
675 * guest physical addresses and entering the shadow page would
676 * kind of screw things up...
677 */
678 int rc = pgmLock(pVM);
679 AssertRC(rc);
680
681 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
682 {
683 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
684 if (RT_FAILURE(rc))
685 {
686 pgmUnlock(pVM);
687 return rc;
688 }
689 }
690
691 void *pvDstPage;
692 PPGMPAGEMAP pMapIgnored;
693 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
694 if (RT_SUCCESS(rc))
695 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
696
697 pgmUnlock(pVM);
698 return rc;
699 }
700
701 default:
702 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
703 pRom->aPages[iPage].enmProt, iPage, GCPhys),
704 VERR_INTERNAL_ERROR);
705 }
706}
707
708
709
710/**
711 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
712 * and verify that the virgin part is untouched.
713 *
714 * This is done after the normal memory has been cleared.
715 *
716 * @param pVM The VM handle.
717 */
718int pgmR3PhysRomReset(PVM pVM)
719{
720 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
721 {
722 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
723
724 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
725 {
726 /*
727 * Reset the physical handler.
728 */
729 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
730 AssertRCReturn(rc, rc);
731
732 /*
733 * What we do with the shadow pages depends on the memory
734 * preallocation option. If not enabled, we'll just throw
735 * out all the dirty pages and replace them by the zero page.
736 */
737 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
738 {
739 /* Count dirty shadow pages. */
740 uint32_t cDirty = 0;
741 uint32_t iPage = cPages;
742 while (iPage-- > 0)
743 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
744 cDirty++;
745 if (cDirty)
746 {
747 /* Free the dirty pages. */
748 PGMMFREEPAGESREQ pReq;
749 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
750 AssertRCReturn(rc, rc);
751
752 uint32_t iReqPage = 0;
753 for (iPage = 0; iPage < cPages; iPage++)
754 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
755 {
756 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
757 iReqPage++;
758 }
759
760 rc = GMMR3FreePagesPerform(pVM, pReq);
761 GMMR3FreePagesCleanup(pReq);
762 AssertRCReturn(rc, rc);
763
764 /* setup the zero page. */
765 for (iPage = 0; iPage < cPages; iPage++)
766 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
767 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
768 }
769 }
770 else
771 {
772 /* clear all the pages. */
773 pgmLock(pVM);
774 for (uint32_t iPage = 0; iPage < cPages; iPage++)
775 {
776 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
777 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
778 if (RT_FAILURE(rc))
779 break;
780
781 void *pvDstPage;
782 PPGMPAGEMAP pMapIgnored;
783 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
784 if (RT_FAILURE(rc))
785 break;
786 memset(pvDstPage, 0, PAGE_SIZE);
787 }
788 pgmUnlock(pVM);
789 AssertRCReturn(rc, rc);
790 }
791 }
792
793#ifdef VBOX_STRICT
794 /*
795 * Verify that the virgin page is unchanged if possible.
796 */
797 if (pRom->pvOriginal)
798 {
799 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
800 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
801 {
802 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
803 PPGMPAGEMAP pMapIgnored;
804 void *pvDstPage;
805 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
806 if (RT_FAILURE(rc))
807 break;
808 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
809 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
810 GCPhys, pRom->pszDesc));
811 }
812 }
813#endif
814 }
815
816 return VINF_SUCCESS;
817}
818
819
820/**
821 * Change the shadowing of a range of ROM pages.
822 *
823 * This is intended for implementing chipset specific memory registers
824 * and will not be very strict about the input. It will silently ignore
825 * any pages that are not the part of a shadowed ROM.
826 *
827 * @returns VBox status code.
828 * @param pVM Pointer to the shared VM structure.
829 * @param GCPhys Where to start. Page aligned.
830 * @param cb How much to change. Page aligned.
831 * @param enmProt The new ROM protection.
832 */
833PGMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
834{
835 /*
836 * Check input
837 */
838 if (!cb)
839 return VINF_SUCCESS;
840 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
841 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
842 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
843 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
844 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
845
846 /*
847 * Process the request.
848 */
849 bool fFlushedPool = false;
850 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
851 if ( GCPhys <= pRom->GCPhysLast
852 && GCPhysLast >= pRom->GCPhys)
853 {
854 /*
855 * Iterate the relevant pages and the ncessary make changes.
856 */
857 bool fChanges = false;
858 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
859 ? pRom->cb >> PAGE_SHIFT
860 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
861 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
862 iPage < cPages;
863 iPage++)
864 {
865 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
866 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
867 {
868 fChanges = true;
869
870 /* flush the page pool first so we don't leave any usage references dangling. */
871 if (!fFlushedPool)
872 {
873 pgmPoolFlushAll(pVM);
874 fFlushedPool = true;
875 }
876
877 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
878 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
879 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
880
881 *pOld = *pRamPage;
882 *pRamPage = *pNew;
883 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
884 }
885 }
886
887 /*
888 * Reset the access handler if we made changes, no need
889 * to optimize this.
890 */
891 if (fChanges)
892 {
893 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
894 AssertRCReturn(rc, rc);
895 }
896
897 /* Advance - cb isn't updated. */
898 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
899 }
900
901 return VINF_SUCCESS;
902}
903
904
905/**
906 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
907 * registration APIs calls to inform PGM about memory registrations.
908 *
909 * It registers the physical memory range with PGM. MM is responsible
910 * for the toplevel things - allocation and locking - while PGM is taking
911 * care of all the details and implements the physical address space virtualization.
912 *
913 * @returns VBox status.
914 * @param pVM The VM handle.
915 * @param pvRam HC virtual address of the RAM range. (page aligned)
916 * @param GCPhys GC physical address of the RAM range. (page aligned)
917 * @param cb Size of the RAM range. (page aligned)
918 * @param fFlags Flags, MM_RAM_*.
919 * @param paPages Pointer an array of physical page descriptors.
920 * @param pszDesc Description string.
921 */
922PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
923{
924 /*
925 * Validate input.
926 * (Not so important because callers are only MMR3PhysRegister()
927 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
928 */
929 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
930
931 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
932 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
933 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
934 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
935 Assert(!(fFlags & ~0xfff));
936 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
937 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
938 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
939 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
940 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
941 if (GCPhysLast < GCPhys)
942 {
943 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
944 return VERR_INVALID_PARAMETER;
945 }
946
947 /*
948 * Find range location and check for conflicts.
949 */
950 PPGMRAMRANGE pPrev = NULL;
951 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
952 while (pCur)
953 {
954 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
955 {
956 AssertMsgFailed(("Conflict! This cannot happen!\n"));
957 return VERR_PGM_RAM_CONFLICT;
958 }
959 if (GCPhysLast < pCur->GCPhys)
960 break;
961
962 /* next */
963 pPrev = pCur;
964 pCur = pCur->pNextR3;
965 }
966
967 /*
968 * Allocate RAM range.
969 * Small ranges are allocated from the heap, big ones have separate mappings.
970 */
971 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
972 PPGMRAMRANGE pNew;
973 RTGCPTR GCPtrNew;
974 int rc = VERR_NO_MEMORY;
975 if (cbRam > PAGE_SIZE / 2)
976 { /* large */
977 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
978 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
979 if (VBOX_SUCCESS(rc))
980 {
981 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
982 if (VBOX_SUCCESS(rc))
983 {
984 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
985 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
986 }
987 else
988 {
989 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
990 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
991 }
992 }
993 else
994 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
995
996 }
997/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
998 if (RT_FAILURE(rc))
999 { /* small + fallback (vga) */
1000 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1001 if (VBOX_SUCCESS(rc))
1002 GCPtrNew = MMHyperHC2GC(pVM, pNew);
1003 else
1004 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
1005 }
1006 if (VBOX_SUCCESS(rc))
1007 {
1008 /*
1009 * Initialize the range.
1010 */
1011 pNew->pvHC = pvRam;
1012 pNew->GCPhys = GCPhys;
1013 pNew->GCPhysLast = GCPhysLast;
1014 pNew->cb = cb;
1015 pNew->fFlags = fFlags;
1016 pNew->pavHCChunkHC = NULL;
1017 pNew->pavHCChunkGC = 0;
1018
1019 unsigned iPage = cb >> PAGE_SHIFT;
1020 if (paPages)
1021 {
1022 while (iPage-- > 0)
1023 {
1024 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1025 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
1026 PGM_PAGE_STATE_ALLOCATED);
1027 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1028 }
1029 }
1030 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1031 {
1032 /* Allocate memory for chunk to HC ptr lookup array. */
1033 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
1034 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
1035
1036 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
1037 Assert(pNew->pavHCChunkGC);
1038
1039 /* Physical memory will be allocated on demand. */
1040 while (iPage-- > 0)
1041 {
1042 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
1043 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1044 }
1045 }
1046 else
1047 {
1048 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1049 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
1050 while (iPage-- > 0)
1051 {
1052 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
1053 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1054 }
1055 }
1056
1057 /*
1058 * Insert the new RAM range.
1059 */
1060 pgmLock(pVM);
1061 pNew->pNextR3 = pCur;
1062 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1063 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : NIL_RTGCPTR;
1064 if (pPrev)
1065 {
1066 pPrev->pNextR3 = pNew;
1067 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1068 pPrev->pNextGC = GCPtrNew;
1069 }
1070 else
1071 {
1072 pVM->pgm.s.pRamRangesR3 = pNew;
1073 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1074 pVM->pgm.s.pRamRangesGC = GCPtrNew;
1075 }
1076 pgmUnlock(pVM);
1077 }
1078 return rc;
1079}
1080
1081#ifndef VBOX_WITH_NEW_PHYS_CODE
1082
1083/**
1084 * Register a chunk of a the physical memory range with PGM. MM is responsible
1085 * for the toplevel things - allocation and locking - while PGM is taking
1086 * care of all the details and implements the physical address space virtualization.
1087 *
1088 *
1089 * @returns VBox status.
1090 * @param pVM The VM handle.
1091 * @param pvRam HC virtual address of the RAM range. (page aligned)
1092 * @param GCPhys GC physical address of the RAM range. (page aligned)
1093 * @param cb Size of the RAM range. (page aligned)
1094 * @param fFlags Flags, MM_RAM_*.
1095 * @param paPages Pointer an array of physical page descriptors.
1096 * @param pszDesc Description string.
1097 */
1098PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1099{
1100 NOREF(pszDesc);
1101
1102 /*
1103 * Validate input.
1104 * (Not so important because callers are only MMR3PhysRegister()
1105 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1106 */
1107 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1108
1109 Assert(paPages);
1110 Assert(pvRam);
1111 Assert(!(fFlags & ~0xfff));
1112 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1113 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1114 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1115 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1116 Assert(VM_IS_EMT(pVM));
1117 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1118 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1119
1120 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1121 if (GCPhysLast < GCPhys)
1122 {
1123 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1124 return VERR_INVALID_PARAMETER;
1125 }
1126
1127 /*
1128 * Find existing range location.
1129 */
1130 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1131 while (pRam)
1132 {
1133 RTGCPHYS off = GCPhys - pRam->GCPhys;
1134 if ( off < pRam->cb
1135 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1136 break;
1137
1138 pRam = CTXALLSUFF(pRam->pNext);
1139 }
1140 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1141
1142 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1143 unsigned iPage = cb >> PAGE_SHIFT;
1144 if (paPages)
1145 {
1146 while (iPage-- > 0)
1147 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1148 }
1149 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1150 pRam->pavHCChunkHC[off] = pvRam;
1151
1152 /* Notify the recompiler. */
1153 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1154
1155 return VINF_SUCCESS;
1156}
1157
1158
1159/**
1160 * Allocate missing physical pages for an existing guest RAM range.
1161 *
1162 * @returns VBox status.
1163 * @param pVM The VM handle.
1164 * @param GCPhys GC physical address of the RAM range. (page aligned)
1165 */
1166PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
1167{
1168 RTGCPHYS GCPhys = *pGCPhys;
1169
1170 /*
1171 * Walk range list.
1172 */
1173 pgmLock(pVM);
1174
1175 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1176 while (pRam)
1177 {
1178 RTGCPHYS off = GCPhys - pRam->GCPhys;
1179 if ( off < pRam->cb
1180 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1181 {
1182 bool fRangeExists = false;
1183 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1184
1185 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1186 if (pRam->pavHCChunkHC[off])
1187 fRangeExists = true;
1188
1189 pgmUnlock(pVM);
1190 if (fRangeExists)
1191 return VINF_SUCCESS;
1192 return pgmr3PhysGrowRange(pVM, GCPhys);
1193 }
1194
1195 pRam = CTXALLSUFF(pRam->pNext);
1196 }
1197 pgmUnlock(pVM);
1198 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1199}
1200
1201
1202/**
1203 * Allocate missing physical pages for an existing guest RAM range.
1204 *
1205 * @returns VBox status.
1206 * @param pVM The VM handle.
1207 * @param pRamRange RAM range
1208 * @param GCPhys GC physical address of the RAM range. (page aligned)
1209 */
1210int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1211{
1212 void *pvRam;
1213 int rc;
1214
1215 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1216 if (!VM_IS_EMT(pVM))
1217 {
1218 PVMREQ pReq;
1219 const RTGCPHYS GCPhysParam = GCPhys;
1220
1221 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1222
1223 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
1224 if (VBOX_SUCCESS(rc))
1225 {
1226 rc = pReq->iStatus;
1227 VMR3ReqFree(pReq);
1228 }
1229 return rc;
1230 }
1231
1232 /* Round down to chunk boundary */
1233 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1234
1235 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
1236 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1237
1238 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1239
1240 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1241
1242 for (;;)
1243 {
1244 rc = SUPPageAlloc(cPages, &pvRam);
1245 if (VBOX_SUCCESS(rc))
1246 {
1247
1248 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
1249 if (VBOX_SUCCESS(rc))
1250 return rc;
1251
1252 SUPPageFree(pvRam, cPages);
1253 }
1254
1255 VMSTATE enmVMState = VMR3GetState(pVM);
1256 if (enmVMState != VMSTATE_RUNNING)
1257 {
1258 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
1259 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
1260 return rc;
1261 }
1262
1263 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
1264
1265 /* Pause first, then inform Main. */
1266 rc = VMR3SuspendNoSave(pVM);
1267 AssertRC(rc);
1268
1269 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
1270
1271 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
1272 rc = VMR3WaitForResume(pVM);
1273
1274 /* Retry */
1275 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
1276 }
1277}
1278
1279#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1280
1281/**
1282 * Interface MMIO handler relocation calls.
1283 *
1284 * It relocates an existing physical memory range with PGM.
1285 *
1286 * @returns VBox status.
1287 * @param pVM The VM handle.
1288 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
1289 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)
1290 * @param cb Size of the RAM range. (page aligned)
1291 */
1292PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb)
1293{
1294 /*
1295 * Validate input.
1296 * (Not so important because callers are only MMR3PhysRelocate(),
1297 * but anyway...)
1298 */
1299 Log(("PGMR3PhysRelocate Old %VGp New %VGp (%#x bytes)\n", GCPhysOld, GCPhysNew, cb));
1300
1301 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1302 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
1303 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
1304 RTGCPHYS GCPhysLast;
1305 GCPhysLast = GCPhysOld + (cb - 1);
1306 if (GCPhysLast < GCPhysOld)
1307 {
1308 AssertMsgFailed(("The old range wraps! GCPhys=%VGp cb=%#x\n", GCPhysOld, cb));
1309 return VERR_INVALID_PARAMETER;
1310 }
1311 GCPhysLast = GCPhysNew + (cb - 1);
1312 if (GCPhysLast < GCPhysNew)
1313 {
1314 AssertMsgFailed(("The new range wraps! GCPhys=%VGp cb=%#x\n", GCPhysNew, cb));
1315 return VERR_INVALID_PARAMETER;
1316 }
1317
1318 /*
1319 * Find and remove old range location.
1320 */
1321 pgmLock(pVM);
1322 PPGMRAMRANGE pPrev = NULL;
1323 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1324 while (pCur)
1325 {
1326 if (pCur->GCPhys == GCPhysOld && pCur->cb == cb)
1327 break;
1328
1329 /* next */
1330 pPrev = pCur;
1331 pCur = pCur->pNextR3;
1332 }
1333 if (pPrev)
1334 {
1335 pPrev->pNextR3 = pCur->pNextR3;
1336 pPrev->pNextR0 = pCur->pNextR0;
1337 pPrev->pNextGC = pCur->pNextGC;
1338 }
1339 else
1340 {
1341 pVM->pgm.s.pRamRangesR3 = pCur->pNextR3;
1342 pVM->pgm.s.pRamRangesR0 = pCur->pNextR0;
1343 pVM->pgm.s.pRamRangesGC = pCur->pNextGC;
1344 }
1345
1346 /*
1347 * Update the range.
1348 */
1349 pCur->GCPhys = GCPhysNew;
1350 pCur->GCPhysLast= GCPhysLast;
1351 PPGMRAMRANGE pNew = pCur;
1352
1353 /*
1354 * Find range location and check for conflicts.
1355 */
1356 pPrev = NULL;
1357 pCur = pVM->pgm.s.pRamRangesR3;
1358 while (pCur)
1359 {
1360 if (GCPhysNew <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1361 {
1362 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1363 pgmUnlock(pVM);
1364 return VERR_PGM_RAM_CONFLICT;
1365 }
1366 if (GCPhysLast < pCur->GCPhys)
1367 break;
1368
1369 /* next */
1370 pPrev = pCur;
1371 pCur = pCur->pNextR3;
1372 }
1373
1374 /*
1375 * Reinsert the RAM range.
1376 */
1377 pNew->pNextR3 = pCur;
1378 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : 0;
1379 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : 0;
1380 if (pPrev)
1381 {
1382 pPrev->pNextR3 = pNew;
1383 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1384 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
1385 }
1386 else
1387 {
1388 pVM->pgm.s.pRamRangesR3 = pNew;
1389 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1390 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
1391 }
1392
1393 pgmUnlock(pVM);
1394 return VINF_SUCCESS;
1395}
1396
1397
1398/**
1399 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
1400 * flags of existing RAM ranges.
1401 *
1402 * @returns VBox status.
1403 * @param pVM The VM handle.
1404 * @param GCPhys GC physical address of the RAM range. (page aligned)
1405 * @param cb Size of the RAM range. (page aligned)
1406 * @param fFlags The Or flags, MM_RAM_* \#defines.
1407 * @param fMask The and mask for the flags.
1408 */
1409PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
1410{
1411 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
1412
1413 /*
1414 * Validate input.
1415 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
1416 */
1417 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
1418 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1419 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1420 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1421 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1422
1423 /*
1424 * Lookup the range.
1425 */
1426 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1427 while (pRam && GCPhys > pRam->GCPhysLast)
1428 pRam = CTXALLSUFF(pRam->pNext);
1429 if ( !pRam
1430 || GCPhys > pRam->GCPhysLast
1431 || GCPhysLast < pRam->GCPhys)
1432 {
1433 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
1434 return VERR_INVALID_PARAMETER;
1435 }
1436
1437 /*
1438 * Update the requested flags.
1439 */
1440 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
1441 | fMask;
1442 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
1443 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1444 for ( ; iPage < iPageEnd; iPage++)
1445 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
1446
1447 return VINF_SUCCESS;
1448}
1449
1450
1451/**
1452 * Sets the Address Gate 20 state.
1453 *
1454 * @param pVM VM handle.
1455 * @param fEnable True if the gate should be enabled.
1456 * False if the gate should be disabled.
1457 */
1458PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
1459{
1460 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
1461 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
1462 {
1463 pVM->pgm.s.fA20Enabled = fEnable;
1464 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
1465 REMR3A20Set(pVM, fEnable);
1466 }
1467}
1468
1469
1470/**
1471 * Tree enumeration callback for dealing with age rollover.
1472 * It will perform a simple compression of the current age.
1473 */
1474static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
1475{
1476 /* Age compression - ASSUMES iNow == 4. */
1477 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1478 if (pChunk->iAge >= UINT32_C(0xffffff00))
1479 pChunk->iAge = 3;
1480 else if (pChunk->iAge >= UINT32_C(0xfffff000))
1481 pChunk->iAge = 2;
1482 else if (pChunk->iAge)
1483 pChunk->iAge = 1;
1484 else /* iAge = 0 */
1485 pChunk->iAge = 4;
1486
1487 /* reinsert */
1488 PVM pVM = (PVM)pvUser;
1489 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1490 pChunk->AgeCore.Key = pChunk->iAge;
1491 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1492 return 0;
1493}
1494
1495
1496/**
1497 * Tree enumeration callback that updates the chunks that have
1498 * been used since the last
1499 */
1500static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
1501{
1502 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1503 if (!pChunk->iAge)
1504 {
1505 PVM pVM = (PVM)pvUser;
1506 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1507 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
1508 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1509 }
1510
1511 return 0;
1512}
1513
1514
1515/**
1516 * Performs ageing of the ring-3 chunk mappings.
1517 *
1518 * @param pVM The VM handle.
1519 */
1520PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
1521{
1522 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
1523 pVM->pgm.s.ChunkR3Map.iNow++;
1524 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
1525 {
1526 pVM->pgm.s.ChunkR3Map.iNow = 4;
1527 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
1528 }
1529 else
1530 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
1531}
1532
1533
1534/**
1535 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
1536 */
1537typedef struct PGMR3PHYSCHUNKUNMAPCB
1538{
1539 PVM pVM; /**< The VM handle. */
1540 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
1541} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
1542
1543
1544/**
1545 * Callback used to find the mapping that's been unused for
1546 * the longest time.
1547 */
1548static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
1549{
1550 do
1551 {
1552 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
1553 if ( pChunk->iAge
1554 && !pChunk->cRefs)
1555 {
1556 /*
1557 * Check that it's not in any of the TLBs.
1558 */
1559 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
1560 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1561 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
1562 {
1563 pChunk = NULL;
1564 break;
1565 }
1566 if (pChunk)
1567 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
1568 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
1569 {
1570 pChunk = NULL;
1571 break;
1572 }
1573 if (pChunk)
1574 {
1575 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
1576 return 1; /* done */
1577 }
1578 }
1579
1580 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
1581 pNode = pNode->pList;
1582 } while (pNode);
1583 return 0;
1584}
1585
1586
1587/**
1588 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
1589 *
1590 * The candidate will not be part of any TLBs, so no need to flush
1591 * anything afterwards.
1592 *
1593 * @returns Chunk id.
1594 * @param pVM The VM handle.
1595 */
1596static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
1597{
1598 /*
1599 * Do tree ageing first?
1600 */
1601 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
1602 PGMR3PhysChunkAgeing(pVM);
1603
1604 /*
1605 * Enumerate the age tree starting with the left most node.
1606 */
1607 PGMR3PHYSCHUNKUNMAPCB Args;
1608 Args.pVM = pVM;
1609 Args.pChunk = NULL;
1610 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
1611 return Args.pChunk->Core.Key;
1612 return INT32_MAX;
1613}
1614
1615
1616/**
1617 * Maps the given chunk into the ring-3 mapping cache.
1618 *
1619 * This will call ring-0.
1620 *
1621 * @returns VBox status code.
1622 * @param pVM The VM handle.
1623 * @param idChunk The chunk in question.
1624 * @param ppChunk Where to store the chunk tracking structure.
1625 *
1626 * @remarks Called from within the PGM critical section.
1627 */
1628int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
1629{
1630 int rc;
1631 /*
1632 * Allocate a new tracking structure first.
1633 */
1634#if 0 /* for later when we've got a separate mapping method for ring-0. */
1635 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
1636 AssertReturn(pChunk, VERR_NO_MEMORY);
1637#else
1638 PPGMCHUNKR3MAP pChunk;
1639 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
1640 AssertRCReturn(rc, rc);
1641#endif
1642 pChunk->Core.Key = idChunk;
1643 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
1644 pChunk->iAge = 0;
1645 pChunk->cRefs = 0;
1646 pChunk->cPermRefs = 0;
1647 pChunk->pv = NULL;
1648
1649 /*
1650 * Request the ring-0 part to map the chunk in question and if
1651 * necessary unmap another one to make space in the mapping cache.
1652 */
1653 GMMMAPUNMAPCHUNKREQ Req;
1654 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
1655 Req.Hdr.cbReq = sizeof(Req);
1656 Req.pvR3 = NULL;
1657 Req.idChunkMap = idChunk;
1658 Req.idChunkUnmap = INT32_MAX;
1659 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
1660 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
1661 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
1662 if (VBOX_SUCCESS(rc))
1663 {
1664 /*
1665 * Update the tree.
1666 */
1667 /* insert the new one. */
1668 AssertPtr(Req.pvR3);
1669 pChunk->pv = Req.pvR3;
1670 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
1671 AssertRelease(fRc);
1672 pVM->pgm.s.ChunkR3Map.c++;
1673
1674 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1675 AssertRelease(fRc);
1676
1677 /* remove the unmapped one. */
1678 if (Req.idChunkUnmap != INT32_MAX)
1679 {
1680 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
1681 AssertRelease(pUnmappedChunk);
1682 pUnmappedChunk->pv = NULL;
1683 pUnmappedChunk->Core.Key = UINT32_MAX;
1684#if 0 /* for later when we've got a separate mapping method for ring-0. */
1685 MMR3HeapFree(pUnmappedChunk);
1686#else
1687 MMHyperFree(pVM, pUnmappedChunk);
1688#endif
1689 pVM->pgm.s.ChunkR3Map.c--;
1690 }
1691 }
1692 else
1693 {
1694 AssertRC(rc);
1695#if 0 /* for later when we've got a separate mapping method for ring-0. */
1696 MMR3HeapFree(pChunk);
1697#else
1698 MMHyperFree(pVM, pChunk);
1699#endif
1700 pChunk = NULL;
1701 }
1702
1703 *ppChunk = pChunk;
1704 return rc;
1705}
1706
1707
1708/**
1709 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
1710 *
1711 * @returns see pgmR3PhysChunkMap.
1712 * @param pVM The VM handle.
1713 * @param idChunk The chunk to map.
1714 */
1715PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
1716{
1717 PPGMCHUNKR3MAP pChunk;
1718 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
1719}
1720
1721
1722/**
1723 * Invalidates the TLB for the ring-3 mapping cache.
1724 *
1725 * @param pVM The VM handle.
1726 */
1727PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
1728{
1729 pgmLock(pVM);
1730 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1731 {
1732 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
1733 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
1734 }
1735 pgmUnlock(pVM);
1736}
1737
1738
1739/**
1740 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
1741 *
1742 * @returns The following VBox status codes.
1743 * @retval VINF_SUCCESS on success. FF cleared.
1744 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
1745 *
1746 * @param pVM The VM handle.
1747 */
1748PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
1749{
1750 pgmLock(pVM);
1751 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
1752 if (rc == VERR_GMM_SEED_ME)
1753 {
1754 void *pvChunk;
1755 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
1756 if (VBOX_SUCCESS(rc))
1757 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
1758 if (VBOX_FAILURE(rc))
1759 {
1760 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
1761 rc = VINF_EM_NO_MEMORY;
1762 }
1763 }
1764 pgmUnlock(pVM);
1765 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
1766 return rc;
1767}
1768
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette