VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 6854

Last change on this file since 6854 was 6854, checked in by vboxsync, 17 years ago

All the new ROM stuff. Had to change PGMROMPAGE a bit to make it easier to work with wrt. mapping.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.4 KB
Line 
1/* $Id: PGMPhys.cpp 6854 2008-02-07 19:24:14Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/pgm.h>
24#include <VBox/cpum.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/csam.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/dbg.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#include <iprt/thread.h>
41#include <iprt/string.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47/*static - shut up warning */
48DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
49
50
51
52/*
53 * PGMR3PhysReadByte/Word/Dword
54 * PGMR3PhysWriteByte/Word/Dword
55 */
56/** @todo rename and add U64. */
57
58#define PGMPHYSFN_READNAME PGMR3PhysReadByte
59#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
60#define PGMPHYS_DATASIZE 1
61#define PGMPHYS_DATATYPE uint8_t
62#include "PGMPhys.h"
63
64#define PGMPHYSFN_READNAME PGMR3PhysReadWord
65#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
66#define PGMPHYS_DATASIZE 2
67#define PGMPHYS_DATATYPE uint16_t
68#include "PGMPhys.h"
69
70#define PGMPHYSFN_READNAME PGMR3PhysReadDword
71#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
72#define PGMPHYS_DATASIZE 4
73#define PGMPHYS_DATATYPE uint32_t
74#include "PGMPhys.h"
75
76
77
78/**
79 * Links a new RAM range into the list.
80 *
81 * @param pVM Pointer to the shared VM structure.
82 * @param pNew Pointer to the new list entry.
83 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
84 */
85static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
86{
87 pgmLock(pVM);
88
89 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
90 pNew->pNextR3 = pRam;
91 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
92 pNew->pNextGC = pRam ? MMHyperCCToGC(pVM, pRam) : NIL_RTGCPTR;
93
94 if (pPrev)
95 {
96 pPrev->pNextR3 = pNew;
97 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
98 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
99 }
100 else
101 {
102 pVM->pgm.s.pRamRangesR3 = pNew;
103 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
104 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
105 }
106
107 pgmUnlock(pVM);
108}
109
110
111/**
112 * Links a new RAM range into the list.
113 *
114 * @param pVM Pointer to the shared VM structure.
115 * @param pRam Pointer to the new list entry.
116 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
117 */
118static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
119{
120 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
121
122 pgmLock(pVM);
123
124 PPGMRAMRANGE pNext = pRam->pNextR3;
125 if (pPrev)
126 {
127 pPrev->pNextR3 = pNext;
128 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
129 pPrev->pNextGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
130 }
131 else
132 {
133 pVM->pgm.s.pRamRangesR3 = pNext;
134 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
135 pVM->pgm.s.pRamRangesGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
136 }
137
138 pgmUnlock(pVM);
139}
140
141
142
143/**
144 * Sets up a range RAM.
145 *
146 * This will check for conflicting registrations, make a resource
147 * reservation for the memory (with GMM), and setup the per-page
148 * tracking structures (PGMPAGE).
149 *
150 * @returns VBox stutus code.
151 * @param pVM Pointer to the shared VM structure.
152 * @param GCPhys The physical address of the RAM.
153 * @param cb The size of the RAM.
154 * @param pszDesc The description - not copied, so, don't free or change it.
155 */
156PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
157{
158 /*
159 * Validate input.
160 */
161 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
162 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
163 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
164 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
165 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
166 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
167 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
168 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
169
170 /*
171 * Find range location and check for conflicts.
172 * (We don't lock here because the locking by EMT is only required on update.)
173 */
174 PPGMRAMRANGE pPrev = NULL;
175 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
176 while (pRam && GCPhysLast >= pRam->GCPhys)
177 {
178 if ( GCPhys <= pRam->GCPhysLast
179 && GCPhysLast >= pRam->GCPhys)
180 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
181 GCPhys, GCPhysLast, pszDesc,
182 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
183 VERR_PGM_RAM_CONFLICT);
184
185 /* next */
186 pPrev = pRam;
187 pRam = pRam->pNextR3;
188 }
189
190 /*
191 * Register it with GMM (the API bitches).
192 */
193 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
194 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
195 if (RT_FAILURE(rc))
196 return rc;
197
198 /*
199 * Allocate RAM range.
200 */
201 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
202 PPGMRAMRANGE pNew;
203 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
204 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zd\n", cbRamRange), rc);
205
206 /*
207 * Initialize the range.
208 */
209 pNew->GCPhys = GCPhys;
210 pNew->GCPhysLast = GCPhysLast;
211 pNew->pszDesc = pszDesc;
212 pNew->cb = cb;
213 pNew->fFlags = 0;
214 pNew->pvHC = NULL;
215
216 pNew->pavHCChunkHC = NULL;
217 pNew->pavHCChunkGC = 0;
218
219#ifndef VBOX_WITH_NEW_PHYS_CODE
220 /* Allocate memory for chunk to HC ptr lookup array. */
221 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
222 AssertRCReturn(rc, rc);
223 pNew->pavHCChunkGC = MMHyperCCToGC(pVM, pNew->pavHCChunkHC);
224 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
225
226#endif
227 RTGCPHYS iPage = cPages;
228 while (iPage-- > 0)
229 {
230#ifdef VBOX_WITH_NEW_PHYS_CODE
231 pNew->aPages[iPage].HCPhys = pVM->pgm.s.HCPhysZeroPg;
232#else
233 pNew->aPages[iPage].HCPhys = 0;
234#endif
235 pNew->aPages[iPage].fWrittenTo = 0;
236 pNew->aPages[iPage].fSomethingElse = 0;
237 pNew->aPages[iPage].u29B = 0;
238 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], PGMPAGETYPE_RAM);
239 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ZERO);
240 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
241 }
242
243 /*
244 * Insert the new RAM range.
245 */
246 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
247
248 /*
249 * Notify REM.
250 */
251#ifdef VBOX_WITH_NEW_PHYS_CODE
252 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
253#else
254 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
255#endif
256
257 return VINF_SUCCESS;
258}
259
260
261/**
262 * Registers a ROM image.
263 *
264 * Shadowed ROM images requires double the amount of backing memory, so,
265 * don't use that unless you have to. Shadowing of ROM images is process
266 * where we can select where the reads go and where the writes go. On real
267 * hardware the chipset provides means to configure this. We provide
268 * PGMR3PhysProtectROM() for this purpose.
269 *
270 * A read-only copy of the ROM image will always be kept around while we
271 * will allocate RAM pages for the changes on demand (unless all memory
272 * is configured to be preallocated).
273 *
274 * @returns VBox status.
275 * @param pVM VM Handle.
276 * @param pDevIns The device instance owning the ROM.
277 * @param GCPhys First physical address in the range.
278 * Must be page aligned!
279 * @param cbRange The size of the range (in bytes).
280 * Must be page aligned!
281 * @param pvBinary Pointer to the binary data backing the ROM image.
282 * This must be exactly \a cbRange in size.
283 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
284 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
285 * @param pszDesc Pointer to description string. This must not be freed.
286 *
287 * @remark There is no way to remove the rom, automatically on device cleanup or
288 * manually from the device yet. This isn't difficult in any way, it's
289 * just not something we expect to be necessary for a while.
290 */
291PGMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
292 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
293{
294 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
295 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
296
297 /*
298 * Validate input.
299 */
300 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
301 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
302 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
303 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
304 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
305 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
306 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
307 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
308 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
309
310 const uint32_t cPages = cb >> PAGE_SHIFT;
311
312 /*
313 * Find the ROM location in the ROM list first.
314 */
315 PPGMROMRANGE pRomPrev = NULL;
316 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
317 while (pRom && GCPhysLast >= pRom->GCPhys)
318 {
319 if ( GCPhys <= pRom->GCPhysLast
320 && GCPhysLast >= pRom->GCPhys)
321 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
322 GCPhys, GCPhysLast, pszDesc,
323 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
324 VERR_PGM_RAM_CONFLICT);
325 /* next */
326 pRomPrev = pRom;
327 pRom = pRom->pNextR3;
328 }
329
330 /*
331 * Find the RAM location and check for conflicts.
332 *
333 * Conflict detection is a bit different than for RAM
334 * registration since a ROM can be located within a RAM
335 * range. So, what we have to check for is other memory
336 * types (other than RAM that is) and that we don't span
337 * more than one RAM range (layz).
338 */
339 bool fRamExists = false;
340 PPGMRAMRANGE pRamPrev = NULL;
341 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
342 while (pRam && GCPhysLast >= pRam->GCPhys)
343 {
344 if ( GCPhys <= pRam->GCPhysLast
345 && GCPhysLast >= pRam->GCPhys)
346 {
347 /* completely within? */
348 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
349 && GCPhysLast <= pRam->GCPhysLast,
350 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
351 GCPhys, GCPhysLast, pszDesc,
352 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
353 VERR_PGM_RAM_CONFLICT);
354 fRamExists = true;
355 break;
356 }
357
358 /* next */
359 pRamPrev = pRam;
360 pRam = pRam->pNextR3;
361 }
362 if (fRamExists)
363 {
364 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
365 uint32_t cPagesLeft = cPages;
366 while (cPagesLeft-- > 0)
367 {
368 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
369 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
370 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
371 VERR_PGM_RAM_CONFLICT);
372 Assert(PGM_PAGE_IS_ZERO(pPage));
373 }
374 }
375
376 /*
377 * Update the base memory reservation if necessary.
378 */
379 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
380 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
381 cExtraBaseCost += cPages;
382 if (cExtraBaseCost)
383 {
384 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
385 if (RT_FAILURE(rc))
386 return rc;
387 }
388
389 /*
390 * Allocate memory for the virgin copy of the RAM.
391 */
392 PGMMALLOCATEPAGESREQ pReq;
393 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
394 AssertRCReturn(rc, rc);
395
396 for (uint32_t iPage = 0; iPage < cPages; iPage++)
397 {
398 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
399 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
400 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
401 }
402
403 pgmLock(pVM);
404 rc = GMMR3AllocatePagesPerform(pVM, pReq);
405 pgmUnlock(pVM);
406 if (RT_FAILURE(rc))
407 {
408 GMMR3AllocatePagesCleanup(pReq);
409 return rc;
410 }
411
412 /*
413 * Allocate the new ROM range and RAM range (if necessary).
414 */
415 PPGMROMRANGE pRomNew;
416 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)pRomNew);
417 if (RT_SUCCESS(rc))
418 {
419 PPGMRAMRANGE pRamNew = NULL;
420 if (!fRamExists)
421 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)pRamNew);
422 if (RT_SUCCESS(rc))
423 {
424 pgmLock(pVM);
425
426 /*
427 * Initialize and insert the RAM range (if required).
428 */
429 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
430 if (!fRamExists)
431 {
432 pRamNew->GCPhys = GCPhys;
433 pRamNew->GCPhysLast = GCPhysLast;
434 pRamNew->pszDesc = pszDesc;
435 pRamNew->cb = cb;
436 pRamNew->fFlags = 0;
437 pRamNew->pvHC = NULL;
438
439 PPGMPAGE pPage = &pRamNew->aPages[0];
440 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
441 {
442 pPage->fWrittenTo = 0;
443 pPage->fSomethingElse = 0;
444 pPage->u29B = 0;
445 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
446 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
447 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
448 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
449
450 pRomPage->Virgin = *pPage;
451 }
452
453 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
454 }
455 else
456 {
457 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
458 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
459 {
460 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
461 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
462 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
463 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
464
465 pRomPage->Virgin = *pPage;
466 }
467
468 pRamNew = pRam;
469 }
470 pgmUnlock(pVM);
471
472
473 /*
474 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
475 */
476 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
477#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
478 pgmR3PhysRomWriteHandler, pRomNew,
479#else
480 NULL, NULL,
481#endif
482 NULL, "pgmGuestROMWriteHandler", MMHyperCCToR0(pVM, pRomNew),
483 NULL, "pgmGuestROMWriteHandler", MMHyperCCToGC(pVM, pRomNew), pszDesc);
484 if (RT_SUCCESS(rc))
485 {
486 pgmLock(pVM);
487
488 /*
489 * Copy the image over to the virgin pages.
490 * This must be done after linking in the RAM range.
491 */
492 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
493 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
494 {
495 void *pvDstPage;
496 PPGMPAGEMAP pMapIgnored;
497 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
498 if (RT_FAILURE(rc))
499 {
500 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
501 break;
502 }
503 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
504 }
505 if (RT_SUCCESS(rc))
506 {
507 /*
508 * Initialize the ROM range.
509 * Note that the Virgin member of the pages has already been initialized above.
510 */
511 pRomNew->GCPhys = GCPhys;
512 pRomNew->GCPhysLast = GCPhysLast;
513 pRomNew->cb = cb;
514 pRomNew->fFlags = fFlags;
515 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
516 pRomNew->pszDesc = pszDesc;
517
518 for (unsigned iPage = 0; iPage < cPages; iPage++)
519 {
520 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
521
522 pPage->Shadow.HCPhys = 0;
523 pPage->Shadow.fWrittenTo = 0;
524 pPage->Shadow.fSomethingElse = 0;
525 pPage->Shadow.u29B = 0;
526 PGM_PAGE_SET_TYPE( &pPage->Shadow, PGMPAGETYPE_ROM_SHADOW);
527 PGM_PAGE_SET_STATE( &pPage->Shadow, PGM_PAGE_STATE_ZERO);
528 PGM_PAGE_SET_PAGEID(&pPage->Shadow, pReq->aPages[iPage].idPage);
529
530 pRomNew->aPages[iPage].enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
531 }
532
533 /*
534 * Insert the ROM range, tell REM and return successfully.
535 */
536 pRomNew->pNextR3 = pRom;
537 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
538 pRomNew->pNextGC = pRom ? MMHyperCCToGC(pVM, pRom) : NIL_RTGCPTR;
539
540 if (pRomPrev)
541 {
542 pRomPrev->pNextR3 = pRomNew;
543 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
544 pRomPrev->pNextGC = MMHyperCCToGC(pVM, pRomNew);
545 }
546 else
547 {
548 pVM->pgm.s.pRomRangesR3 = pRomNew;
549 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
550 pVM->pgm.s.pRomRangesGC = MMHyperCCToGC(pVM, pRomNew);
551 }
552
553 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
554
555 GMMR3AllocatePagesCleanup(pReq);
556 pgmUnlock(pVM);
557 return VINF_SUCCESS;
558 }
559
560 /* bail out */
561
562 pgmUnlock(pVM);
563 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
564 AssertRC(rc2);
565 pgmLock(pVM);
566 }
567
568 pgmR3PhysUnlinkRamRange(pVM, pRamNew, pRamPrev);
569 if (pRamNew)
570 MMHyperFree(pVM, pRamNew);
571 }
572 MMHyperFree(pVM, pRomNew);
573 }
574
575 /** @todo Purge the mapping cache or something... */
576 GMMR3FreeAllocatedPages(pVM, pReq);
577 GMMR3AllocatePagesCleanup(pReq);
578 pgmUnlock(pVM);
579 return rc;
580}
581
582
583/**
584 * \#PF Handler callback for ROM write accesses.
585 *
586 * @returns VINF_SUCCESS if the handler have carried out the operation.
587 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
588 * @param pVM VM Handle.
589 * @param GCPhys The physical address the guest is writing to.
590 * @param pvPhys The HC mapping of that address.
591 * @param pvBuf What the guest is reading/writing.
592 * @param cbBuf How much it's reading/writing.
593 * @param enmAccessType The access type.
594 * @param pvUser User argument.
595 */
596/*static - shut up warning */
597 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
598{
599 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
600 const uint32_t iPage = GCPhys - pRom->GCPhys;
601 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
602 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
603 switch (pRomPage->enmProt)
604 {
605 /*
606 * Ignore.
607 */
608 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
609 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
610 return VINF_SUCCESS;
611
612 /*
613 * Write to the ram page.
614 */
615 case PGMROMPROT_READ_ROM_WRITE_RAM:
616 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
617 {
618 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
619 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
620
621 /*
622 * Take the lock, do lazy allocation, map the page and copy the data.
623 *
624 * Note that we have to bypass the mapping TLB since it works on
625 * guest physical addresses and entering the shadow page would
626 * kind of screw things up...
627 */
628 int rc = pgmLock(pVM);
629 AssertRC(rc);
630
631 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
632 {
633 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
634 if (RT_FAILURE(rc))
635 {
636 pgmUnlock(pVM);
637 return rc;
638 }
639 }
640
641 void *pvDstPage;
642 PPGMPAGEMAP pMapIgnored;
643 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
644 if (RT_SUCCESS(rc))
645 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
646
647 pgmUnlock(pVM);
648 return rc;
649 }
650
651 default:
652 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
653 pRom->aPages[iPage].enmProt, iPage, GCPhys),
654 VERR_INTERNAL_ERROR);
655 }
656}
657
658
659
660/**
661 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
662 * and verify that the virgin part is untouched.
663 *
664 * This is done after the normal memory has been cleared.
665 *
666 * @param pVM The VM handle.
667 */
668int pgmR3PhysRomReset(PVM pVM)
669{
670 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
671 {
672 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
673
674 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
675 {
676 /*
677 * Reset the physical handler.
678 */
679 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
680 AssertRCReturn(rc, rc);
681
682 /*
683 * What we do with the shadow pages depends on the memory
684 * preallocation option. If not enabled, we'll just throw
685 * out all the dirty pages and replace them by the zero page.
686 */
687 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
688 {
689 /* Count dirty shadow pages. */
690 uint32_t cDirty = 0;
691 uint32_t iPage = cPages;
692 while (iPage-- > 0)
693 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
694 cDirty++;
695 if (cDirty)
696 {
697 /* Free the dirty pages. */
698 PGMMFREEPAGESREQ pReq;
699 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
700 AssertRCReturn(rc, rc);
701
702 uint32_t iReqPage = 0;
703 for (iPage = 0; iPage < cPages; iPage++)
704 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
705 {
706 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
707 iReqPage++;
708 }
709
710 rc = GMMR3FreePagesPerform(pVM, pReq);
711 GMMR3FreePagesCleanup(pReq);
712 AssertRCReturn(rc, rc);
713
714 /* setup the zero page. */
715 for (iPage = 0; iPage < cPages; iPage++)
716 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
717 {
718 PGM_PAGE_SET_STATE( &pRom->aPages[iPage].Shadow, PGM_PAGE_STATE_ZERO);
719 PGM_PAGE_SET_HCPHYS(&pRom->aPages[iPage].Shadow, pVM->pgm.s.HCPhysZeroPg);
720 PGM_PAGE_SET_PAGEID(&pRom->aPages[iPage].Shadow, NIL_GMM_PAGEID);
721 pRom->aPages[iPage].Shadow.fWrittenTo = false;
722 iReqPage++;
723 }
724 }
725 }
726 else
727 {
728 /* clear all the pages. */
729 pgmLock(pVM);
730 for (uint32_t iPage = 0; iPage < cPages; iPage++)
731 {
732 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
733 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
734 if (RT_FAILURE(rc))
735 break;
736
737 void *pvDstPage;
738 PPGMPAGEMAP pMapIgnored;
739 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
740 if (RT_FAILURE(rc))
741 break;
742 memset(pvDstPage, 0, PAGE_SIZE);
743 }
744 pgmUnlock(pVM);
745 AssertRCReturn(rc, rc);
746 }
747 }
748
749#ifdef VBOX_STRICT
750 /*
751 * Verify that the virgin page is unchanged if possible.
752 */
753 if (pRom->pvOriginal)
754 {
755 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
756 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
757 {
758 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
759 PPGMPAGEMAP pMapIgnored;
760 void *pvDstPage;
761 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
762 if (RT_FAILURE(rc))
763 break;
764 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
765 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
766 GCPhys, pRom->pszDesc));
767 }
768 }
769#endif
770 }
771
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Change the shadowing of a range of ROM pages.
778 *
779 * This is intended for implementing chipset specific memory registers
780 * and will not be very strict about the input. It will silently ignore
781 * any pages that are not the part of a shadowed ROM.
782 *
783 * @returns VBox status code.
784 * @param pVM Pointer to the shared VM structure.
785 * @param GCPhys Where to start. Page aligned.
786 * @param cb How much to change. Page aligned.
787 * @param enmProt The new ROM protection.
788 */
789PGMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
790{
791 /*
792 * Check input
793 */
794 if (!cb)
795 return VINF_SUCCESS;
796 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
797 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
798 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
799 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
800 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
801
802 /*
803 * Process the request.
804 */
805 bool fFlushedPool = false;
806 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
807 if ( GCPhys <= pRom->GCPhysLast
808 && GCPhysLast >= pRom->GCPhys)
809 {
810 /*
811 * Iterate the relevant pages and the ncessary make changes.
812 */
813 bool fChanges = false;
814 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
815 ? pRom->cb >> PAGE_SHIFT
816 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
817 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
818 iPage < cPages;
819 iPage++)
820 {
821 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
822 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
823 {
824 fChanges = true;
825
826 /* flush the page pool first so we don't leave any usage references dangling. */
827 if (!fFlushedPool)
828 {
829 pgmPoolFlushAll(pVM);
830 fFlushedPool = true;
831 }
832
833 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
834 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
835 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
836
837 *pOld = *pRamPage;
838 *pRamPage = *pNew;
839 /** @todo sync the volatile flags (handlers) when these have been moved out of HCPhys. */
840 }
841 }
842
843 /*
844 * Reset the access handler if we made changes, no need
845 * to optimize this.
846 */
847 if (fChanges)
848 {
849 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
850 AssertRCReturn(rc, rc);
851 }
852
853 /* Advance - cb isn't updated. */
854 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
855 }
856
857 return VINF_SUCCESS;
858}
859
860
861/**
862 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
863 * registration APIs calls to inform PGM about memory registrations.
864 *
865 * It registers the physical memory range with PGM. MM is responsible
866 * for the toplevel things - allocation and locking - while PGM is taking
867 * care of all the details and implements the physical address space virtualization.
868 *
869 * @returns VBox status.
870 * @param pVM The VM handle.
871 * @param pvRam HC virtual address of the RAM range. (page aligned)
872 * @param GCPhys GC physical address of the RAM range. (page aligned)
873 * @param cb Size of the RAM range. (page aligned)
874 * @param fFlags Flags, MM_RAM_*.
875 * @param paPages Pointer an array of physical page descriptors.
876 * @param pszDesc Description string.
877 */
878PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
879{
880 /*
881 * Validate input.
882 * (Not so important because callers are only MMR3PhysRegister()
883 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
884 */
885 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
886
887 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
888 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
889 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
890 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
891 Assert(!(fFlags & ~0xfff));
892 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
893 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
894 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
895 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
896 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
897 if (GCPhysLast < GCPhys)
898 {
899 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
900 return VERR_INVALID_PARAMETER;
901 }
902
903 /*
904 * Find range location and check for conflicts.
905 */
906 PPGMRAMRANGE pPrev = NULL;
907 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
908 while (pCur)
909 {
910 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
911 {
912 AssertMsgFailed(("Conflict! This cannot happen!\n"));
913 return VERR_PGM_RAM_CONFLICT;
914 }
915 if (GCPhysLast < pCur->GCPhys)
916 break;
917
918 /* next */
919 pPrev = pCur;
920 pCur = pCur->pNextR3;
921 }
922
923 /*
924 * Allocate RAM range.
925 * Small ranges are allocated from the heap, big ones have separate mappings.
926 */
927 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
928 PPGMRAMRANGE pNew;
929 RTGCPTR GCPtrNew;
930 int rc = VERR_NO_MEMORY;
931 if (cbRam > PAGE_SIZE / 2)
932 { /* large */
933 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
934 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
935 if (VBOX_SUCCESS(rc))
936 {
937 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
938 if (VBOX_SUCCESS(rc))
939 {
940 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
941 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
942 }
943 else
944 {
945 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
946 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
947 }
948 }
949 else
950 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
951
952 }
953/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
954 if (RT_FAILURE(rc))
955 { /* small + fallback (vga) */
956 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
957 if (VBOX_SUCCESS(rc))
958 GCPtrNew = MMHyperHC2GC(pVM, pNew);
959 else
960 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
961 }
962 if (VBOX_SUCCESS(rc))
963 {
964 /*
965 * Initialize the range.
966 */
967 pNew->pvHC = pvRam;
968 pNew->GCPhys = GCPhys;
969 pNew->GCPhysLast = GCPhysLast;
970 pNew->cb = cb;
971 pNew->fFlags = fFlags;
972 pNew->pavHCChunkHC = NULL;
973 pNew->pavHCChunkGC = 0;
974
975 unsigned iPage = cb >> PAGE_SHIFT;
976 if (paPages)
977 {
978 while (iPage-- > 0)
979 {
980 pNew->aPages[iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
981 pNew->aPages[iPage].fWrittenTo = 0;
982 pNew->aPages[iPage].fSomethingElse = 0;
983 pNew->aPages[iPage].u29B = 0;
984 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
985 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM);
986 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
987 }
988 }
989 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
990 {
991 /* Allocate memory for chunk to HC ptr lookup array. */
992 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
993 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
994
995 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
996 Assert(pNew->pavHCChunkGC);
997
998 /* Physical memory will be allocated on demand. */
999 while (iPage-- > 0)
1000 {
1001 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1002 pNew->aPages[iPage].fWrittenTo = 0;
1003 pNew->aPages[iPage].fSomethingElse = 0;
1004 pNew->aPages[iPage].u29B = 0;
1005 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
1006 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], PGMPAGETYPE_RAM);
1007 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ZERO);
1008 }
1009 }
1010 else
1011 {
1012 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1013 RTHCPHYS HCPhysDummyPage = (MMR3PageDummyHCPhys(pVM) & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1014 while (iPage-- > 0)
1015 {
1016 pNew->aPages[iPage].HCPhys = HCPhysDummyPage; /** @todo PAGE FLAGS */
1017 pNew->aPages[iPage].fWrittenTo = 0;
1018 pNew->aPages[iPage].fSomethingElse = 0;
1019 pNew->aPages[iPage].u29B = 0;
1020 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
1021 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], PGMPAGETYPE_MMIO);
1022 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ZERO);
1023 }
1024 }
1025
1026 /*
1027 * Insert the new RAM range.
1028 */
1029 pgmLock(pVM);
1030 pNew->pNextR3 = pCur;
1031 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1032 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : NIL_RTGCPTR;
1033 if (pPrev)
1034 {
1035 pPrev->pNextR3 = pNew;
1036 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1037 pPrev->pNextGC = GCPtrNew;
1038 }
1039 else
1040 {
1041 pVM->pgm.s.pRamRangesR3 = pNew;
1042 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1043 pVM->pgm.s.pRamRangesGC = GCPtrNew;
1044 }
1045 pgmUnlock(pVM);
1046 }
1047 return rc;
1048}
1049
1050#ifndef VBOX_WITH_NEW_PHYS_CODE
1051
1052/**
1053 * Register a chunk of a the physical memory range with PGM. MM is responsible
1054 * for the toplevel things - allocation and locking - while PGM is taking
1055 * care of all the details and implements the physical address space virtualization.
1056 *
1057 *
1058 * @returns VBox status.
1059 * @param pVM The VM handle.
1060 * @param pvRam HC virtual address of the RAM range. (page aligned)
1061 * @param GCPhys GC physical address of the RAM range. (page aligned)
1062 * @param cb Size of the RAM range. (page aligned)
1063 * @param fFlags Flags, MM_RAM_*.
1064 * @param paPages Pointer an array of physical page descriptors.
1065 * @param pszDesc Description string.
1066 */
1067PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1068{
1069 NOREF(pszDesc);
1070
1071 /*
1072 * Validate input.
1073 * (Not so important because callers are only MMR3PhysRegister()
1074 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1075 */
1076 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1077
1078 Assert(paPages);
1079 Assert(pvRam);
1080 Assert(!(fFlags & ~0xfff));
1081 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1082 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1083 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1084 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1085 Assert(VM_IS_EMT(pVM));
1086 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1087 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1088
1089 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1090 if (GCPhysLast < GCPhys)
1091 {
1092 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1093 return VERR_INVALID_PARAMETER;
1094 }
1095
1096 /*
1097 * Find existing range location.
1098 */
1099 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1100 while (pRam)
1101 {
1102 RTGCPHYS off = GCPhys - pRam->GCPhys;
1103 if ( off < pRam->cb
1104 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1105 break;
1106
1107 pRam = CTXALLSUFF(pRam->pNext);
1108 }
1109 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1110
1111 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1112 unsigned iPage = cb >> PAGE_SHIFT;
1113 if (paPages)
1114 {
1115 while (iPage-- > 0)
1116 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1117 }
1118 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1119 pRam->pavHCChunkHC[off] = pvRam;
1120
1121 /* Notify the recompiler. */
1122 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1123
1124 return VINF_SUCCESS;
1125}
1126
1127
1128/**
1129 * Allocate missing physical pages for an existing guest RAM range.
1130 *
1131 * @returns VBox status.
1132 * @param pVM The VM handle.
1133 * @param GCPhys GC physical address of the RAM range. (page aligned)
1134 */
1135PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1136{
1137 /*
1138 * Walk range list.
1139 */
1140 pgmLock(pVM);
1141
1142 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1143 while (pRam)
1144 {
1145 RTGCPHYS off = GCPhys - pRam->GCPhys;
1146 if ( off < pRam->cb
1147 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1148 {
1149 bool fRangeExists = false;
1150 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1151
1152 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1153 if (pRam->pavHCChunkHC[off])
1154 fRangeExists = true;
1155
1156 pgmUnlock(pVM);
1157 if (fRangeExists)
1158 return VINF_SUCCESS;
1159 return pgmr3PhysGrowRange(pVM, GCPhys);
1160 }
1161
1162 pRam = CTXALLSUFF(pRam->pNext);
1163 }
1164 pgmUnlock(pVM);
1165 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1166}
1167
1168
1169/**
1170 * Allocate missing physical pages for an existing guest RAM range.
1171 *
1172 * @returns VBox status.
1173 * @param pVM The VM handle.
1174 * @param pRamRange RAM range
1175 * @param GCPhys GC physical address of the RAM range. (page aligned)
1176 */
1177int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1178{
1179 void *pvRam;
1180 int rc;
1181
1182 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1183 if (!VM_IS_EMT(pVM))
1184 {
1185 PVMREQ pReq;
1186
1187 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1188
1189 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, GCPhys);
1190 if (VBOX_SUCCESS(rc))
1191 {
1192 rc = pReq->iStatus;
1193 VMR3ReqFree(pReq);
1194 }
1195 return rc;
1196 }
1197
1198 /* Round down to chunk boundary */
1199 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1200
1201 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
1202 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1203
1204 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1205
1206 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1207
1208 for (;;)
1209 {
1210 rc = SUPPageAlloc(cPages, &pvRam);
1211 if (VBOX_SUCCESS(rc))
1212 {
1213
1214 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
1215 if (VBOX_SUCCESS(rc))
1216 return rc;
1217
1218 SUPPageFree(pvRam, cPages);
1219 }
1220
1221 VMSTATE enmVMState = VMR3GetState(pVM);
1222 if (enmVMState != VMSTATE_RUNNING)
1223 {
1224 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
1225 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
1226 return rc;
1227 }
1228
1229 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
1230
1231 /* Pause first, then inform Main. */
1232 rc = VMR3SuspendNoSave(pVM);
1233 AssertRC(rc);
1234
1235 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
1236
1237 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
1238 rc = VMR3WaitForResume(pVM);
1239
1240 /* Retry */
1241 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
1242 }
1243}
1244
1245#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1246
1247/**
1248 * Interface MMIO handler relocation calls.
1249 *
1250 * It relocates an existing physical memory range with PGM.
1251 *
1252 * @returns VBox status.
1253 * @param pVM The VM handle.
1254 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
1255 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)
1256 * @param cb Size of the RAM range. (page aligned)
1257 */
1258PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb)
1259{
1260 /*
1261 * Validate input.
1262 * (Not so important because callers are only MMR3PhysRelocate(),
1263 * but anyway...)
1264 */
1265 Log(("PGMR3PhysRelocate Old %VGp New %VGp (%#x bytes)\n", GCPhysOld, GCPhysNew, cb));
1266
1267 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1268 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
1269 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
1270 RTGCPHYS GCPhysLast;
1271 GCPhysLast = GCPhysOld + (cb - 1);
1272 if (GCPhysLast < GCPhysOld)
1273 {
1274 AssertMsgFailed(("The old range wraps! GCPhys=%VGp cb=%#x\n", GCPhysOld, cb));
1275 return VERR_INVALID_PARAMETER;
1276 }
1277 GCPhysLast = GCPhysNew + (cb - 1);
1278 if (GCPhysLast < GCPhysNew)
1279 {
1280 AssertMsgFailed(("The new range wraps! GCPhys=%VGp cb=%#x\n", GCPhysNew, cb));
1281 return VERR_INVALID_PARAMETER;
1282 }
1283
1284 /*
1285 * Find and remove old range location.
1286 */
1287 pgmLock(pVM);
1288 PPGMRAMRANGE pPrev = NULL;
1289 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1290 while (pCur)
1291 {
1292 if (pCur->GCPhys == GCPhysOld && pCur->cb == cb)
1293 break;
1294
1295 /* next */
1296 pPrev = pCur;
1297 pCur = pCur->pNextR3;
1298 }
1299 if (pPrev)
1300 {
1301 pPrev->pNextR3 = pCur->pNextR3;
1302 pPrev->pNextR0 = pCur->pNextR0;
1303 pPrev->pNextGC = pCur->pNextGC;
1304 }
1305 else
1306 {
1307 pVM->pgm.s.pRamRangesR3 = pCur->pNextR3;
1308 pVM->pgm.s.pRamRangesR0 = pCur->pNextR0;
1309 pVM->pgm.s.pRamRangesGC = pCur->pNextGC;
1310 }
1311
1312 /*
1313 * Update the range.
1314 */
1315 pCur->GCPhys = GCPhysNew;
1316 pCur->GCPhysLast= GCPhysLast;
1317 PPGMRAMRANGE pNew = pCur;
1318
1319 /*
1320 * Find range location and check for conflicts.
1321 */
1322 pPrev = NULL;
1323 pCur = pVM->pgm.s.pRamRangesR3;
1324 while (pCur)
1325 {
1326 if (GCPhysNew <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1327 {
1328 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1329 pgmUnlock(pVM);
1330 return VERR_PGM_RAM_CONFLICT;
1331 }
1332 if (GCPhysLast < pCur->GCPhys)
1333 break;
1334
1335 /* next */
1336 pPrev = pCur;
1337 pCur = pCur->pNextR3;
1338 }
1339
1340 /*
1341 * Reinsert the RAM range.
1342 */
1343 pNew->pNextR3 = pCur;
1344 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : 0;
1345 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : 0;
1346 if (pPrev)
1347 {
1348 pPrev->pNextR3 = pNew;
1349 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1350 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
1351 }
1352 else
1353 {
1354 pVM->pgm.s.pRamRangesR3 = pNew;
1355 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1356 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
1357 }
1358
1359 pgmUnlock(pVM);
1360 return VINF_SUCCESS;
1361}
1362
1363
1364/**
1365 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
1366 * flags of existing RAM ranges.
1367 *
1368 * @returns VBox status.
1369 * @param pVM The VM handle.
1370 * @param GCPhys GC physical address of the RAM range. (page aligned)
1371 * @param cb Size of the RAM range. (page aligned)
1372 * @param fFlags The Or flags, MM_RAM_* \#defines.
1373 * @param fMask The and mask for the flags.
1374 */
1375PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
1376{
1377 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
1378
1379 /*
1380 * Validate input.
1381 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
1382 */
1383 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
1384 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1385 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1386 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1387 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1388
1389 /*
1390 * Lookup the range.
1391 */
1392 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1393 while (pRam && GCPhys > pRam->GCPhysLast)
1394 pRam = CTXALLSUFF(pRam->pNext);
1395 if ( !pRam
1396 || GCPhys > pRam->GCPhysLast
1397 || GCPhysLast < pRam->GCPhys)
1398 {
1399 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
1400 return VERR_INVALID_PARAMETER;
1401 }
1402
1403 /*
1404 * Update the requested flags.
1405 */
1406 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
1407 | fMask;
1408 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
1409 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1410 for ( ; iPage < iPageEnd; iPage++)
1411 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
1412
1413 return VINF_SUCCESS;
1414}
1415
1416
1417/**
1418 * Sets the Address Gate 20 state.
1419 *
1420 * @param pVM VM handle.
1421 * @param fEnable True if the gate should be enabled.
1422 * False if the gate should be disabled.
1423 */
1424PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
1425{
1426 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
1427 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
1428 {
1429 pVM->pgm.s.fA20Enabled = fEnable;
1430 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
1431 REMR3A20Set(pVM, fEnable);
1432 }
1433}
1434
1435
1436/**
1437 * Tree enumeration callback for dealing with age rollover.
1438 * It will perform a simple compression of the current age.
1439 */
1440static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
1441{
1442 /* Age compression - ASSUMES iNow == 4. */
1443 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1444 if (pChunk->iAge >= UINT32_C(0xffffff00))
1445 pChunk->iAge = 3;
1446 else if (pChunk->iAge >= UINT32_C(0xfffff000))
1447 pChunk->iAge = 2;
1448 else if (pChunk->iAge)
1449 pChunk->iAge = 1;
1450 else /* iAge = 0 */
1451 pChunk->iAge = 4;
1452
1453 /* reinsert */
1454 PVM pVM = (PVM)pvUser;
1455 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1456 pChunk->AgeCore.Key = pChunk->iAge;
1457 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1458 return 0;
1459}
1460
1461
1462/**
1463 * Tree enumeration callback that updates the chunks that have
1464 * been used since the last
1465 */
1466static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
1467{
1468 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1469 if (!pChunk->iAge)
1470 {
1471 PVM pVM = (PVM)pvUser;
1472 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1473 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
1474 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1475 }
1476
1477 return 0;
1478}
1479
1480
1481/**
1482 * Performs ageing of the ring-3 chunk mappings.
1483 *
1484 * @param pVM The VM handle.
1485 */
1486PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
1487{
1488 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
1489 pVM->pgm.s.ChunkR3Map.iNow++;
1490 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
1491 {
1492 pVM->pgm.s.ChunkR3Map.iNow = 4;
1493 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
1494 }
1495 else
1496 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
1497}
1498
1499
1500/**
1501 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
1502 */
1503typedef struct PGMR3PHYSCHUNKUNMAPCB
1504{
1505 PVM pVM; /**< The VM handle. */
1506 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
1507} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
1508
1509
1510/**
1511 * Callback used to find the mapping that's been unused for
1512 * the longest time.
1513 */
1514static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
1515{
1516 do
1517 {
1518 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
1519 if ( pChunk->iAge
1520 && !pChunk->cRefs)
1521 {
1522 /*
1523 * Check that it's not in any of the TLBs.
1524 */
1525 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
1526 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1527 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
1528 {
1529 pChunk = NULL;
1530 break;
1531 }
1532 if (pChunk)
1533 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
1534 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
1535 {
1536 pChunk = NULL;
1537 break;
1538 }
1539 if (pChunk)
1540 {
1541 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
1542 return 1; /* done */
1543 }
1544 }
1545
1546 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
1547 pNode = pNode->pList;
1548 } while (pNode);
1549 return 0;
1550}
1551
1552
1553/**
1554 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
1555 *
1556 * The candidate will not be part of any TLBs, so no need to flush
1557 * anything afterwards.
1558 *
1559 * @returns Chunk id.
1560 * @param pVM The VM handle.
1561 */
1562static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
1563{
1564 /*
1565 * Do tree ageing first?
1566 */
1567 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
1568 PGMR3PhysChunkAgeing(pVM);
1569
1570 /*
1571 * Enumerate the age tree starting with the left most node.
1572 */
1573 PGMR3PHYSCHUNKUNMAPCB Args;
1574 Args.pVM = pVM;
1575 Args.pChunk = NULL;
1576 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
1577 return Args.pChunk->Core.Key;
1578 return INT32_MAX;
1579}
1580
1581
1582/**
1583 * Maps the given chunk into the ring-3 mapping cache.
1584 *
1585 * This will call ring-0.
1586 *
1587 * @returns VBox status code.
1588 * @param pVM The VM handle.
1589 * @param idChunk The chunk in question.
1590 * @param ppChunk Where to store the chunk tracking structure.
1591 *
1592 * @remarks Called from within the PGM critical section.
1593 */
1594int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
1595{
1596 int rc;
1597 /*
1598 * Allocate a new tracking structure first.
1599 */
1600#if 0 /* for later when we've got a separate mapping method for ring-0. */
1601 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
1602 AssertReturn(pChunk, VERR_NO_MEMORY);
1603#else
1604 PPGMCHUNKR3MAP pChunk;
1605 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
1606 AssertRCReturn(rc, rc);
1607#endif
1608 pChunk->Core.Key = idChunk;
1609 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
1610 pChunk->iAge = 0;
1611 pChunk->cRefs = 0;
1612 pChunk->cPermRefs = 0;
1613 pChunk->pv = NULL;
1614
1615 /*
1616 * Request the ring-0 part to map the chunk in question and if
1617 * necessary unmap another one to make space in the mapping cache.
1618 */
1619 GMMMAPUNMAPCHUNKREQ Req;
1620 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
1621 Req.Hdr.cbReq = sizeof(Req);
1622 Req.pvR3 = NULL;
1623 Req.idChunkMap = idChunk;
1624 Req.idChunkUnmap = INT32_MAX;
1625 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
1626 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
1627 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
1628 if (VBOX_SUCCESS(rc))
1629 {
1630 /*
1631 * Update the tree.
1632 */
1633 /* insert the new one. */
1634 AssertPtr(Req.pvR3);
1635 pChunk->pv = Req.pvR3;
1636 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
1637 AssertRelease(fRc);
1638 pVM->pgm.s.ChunkR3Map.c++;
1639
1640 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1641 AssertRelease(fRc);
1642
1643 /* remove the unmapped one. */
1644 if (Req.idChunkUnmap != INT32_MAX)
1645 {
1646 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
1647 AssertRelease(pUnmappedChunk);
1648 pUnmappedChunk->pv = NULL;
1649 pUnmappedChunk->Core.Key = UINT32_MAX;
1650#if 0 /* for later when we've got a separate mapping method for ring-0. */
1651 MMR3HeapFree(pUnmappedChunk);
1652#else
1653 MMHyperFree(pVM, pUnmappedChunk);
1654#endif
1655 pVM->pgm.s.ChunkR3Map.c--;
1656 }
1657 }
1658 else
1659 {
1660 AssertRC(rc);
1661#if 0 /* for later when we've got a separate mapping method for ring-0. */
1662 MMR3HeapFree(pChunk);
1663#else
1664 MMHyperFree(pVM, pChunk);
1665#endif
1666 pChunk = NULL;
1667 }
1668
1669 *ppChunk = pChunk;
1670 return rc;
1671}
1672
1673
1674/**
1675 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
1676 *
1677 * @returns see pgmR3PhysChunkMap.
1678 * @param pVM The VM handle.
1679 * @param idChunk The chunk to map.
1680 */
1681PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
1682{
1683 PPGMCHUNKR3MAP pChunk;
1684 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
1685}
1686
1687
1688/**
1689 * Invalidates the TLB for the ring-3 mapping cache.
1690 *
1691 * @param pVM The VM handle.
1692 */
1693PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
1694{
1695 pgmLock(pVM);
1696 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1697 {
1698 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
1699 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
1700 }
1701 pgmUnlock(pVM);
1702}
1703
1704
1705/**
1706 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
1707 *
1708 * @returns The following VBox status codes.
1709 * @retval VINF_SUCCESS on success. FF cleared.
1710 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
1711 *
1712 * @param pVM The VM handle.
1713 */
1714PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
1715{
1716 pgmLock(pVM);
1717 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
1718 if (rc == VERR_GMM_SEED_ME)
1719 {
1720 void *pvChunk;
1721 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
1722 if (VBOX_SUCCESS(rc))
1723 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
1724 if (VBOX_FAILURE(rc))
1725 {
1726 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
1727 rc = VINF_EM_NO_MEMORY;
1728 }
1729 }
1730 pgmUnlock(pVM);
1731 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
1732 return rc;
1733}
1734
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette