VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp@ 61566

Last change on this file since 61566 was 60401, checked in by vboxsync, 9 years ago

PGM: Converted NO_RAM_RESET into a CFGM option (PGM/ZeroRamPagesOnReset).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 167.0 KB
Line 
1/* $Id: PGMPhys.cpp 60401 2016-04-09 23:10:40Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/iom.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/stam.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include <VBox/vmm/pdmdev.h>
31#include "PGMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/vmm/uvm.h>
34#include "PGMInline.h"
35#include <VBox/sup.h>
36#include <VBox/param.h>
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/alloc.h>
41#include <iprt/asm.h>
42#ifdef VBOX_STRICT
43# include <iprt/crc.h>
44#endif
45#include <iprt/thread.h>
46#include <iprt/string.h>
47#include <iprt/system.h>
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** The number of pages to free in one batch. */
54#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
55
56
57/*
58 * PGMR3PhysReadU8-64
59 * PGMR3PhysWriteU8-64
60 */
61#define PGMPHYSFN_READNAME PGMR3PhysReadU8
62#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
63#define PGMPHYS_DATASIZE 1
64#define PGMPHYS_DATATYPE uint8_t
65#include "PGMPhysRWTmpl.h"
66
67#define PGMPHYSFN_READNAME PGMR3PhysReadU16
68#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
69#define PGMPHYS_DATASIZE 2
70#define PGMPHYS_DATATYPE uint16_t
71#include "PGMPhysRWTmpl.h"
72
73#define PGMPHYSFN_READNAME PGMR3PhysReadU32
74#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
75#define PGMPHYS_DATASIZE 4
76#define PGMPHYS_DATATYPE uint32_t
77#include "PGMPhysRWTmpl.h"
78
79#define PGMPHYSFN_READNAME PGMR3PhysReadU64
80#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
81#define PGMPHYS_DATASIZE 8
82#define PGMPHYS_DATATYPE uint64_t
83#include "PGMPhysRWTmpl.h"
84
85
86/**
87 * EMT worker for PGMR3PhysReadExternal.
88 */
89static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead,
90 PGMACCESSORIGIN enmOrigin)
91{
92 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead, enmOrigin);
93 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
94 return VINF_SUCCESS;
95}
96
97
98/**
99 * Read from physical memory, external users.
100 *
101 * @returns VBox status code.
102 * @retval VINF_SUCCESS.
103 *
104 * @param pVM The cross context VM structure.
105 * @param GCPhys Physical address to read from.
106 * @param pvBuf Where to read into.
107 * @param cbRead How many bytes to read.
108 * @param enmOrigin Who is calling.
109 *
110 * @thread Any but EMTs.
111 */
112VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
113{
114 VM_ASSERT_OTHER_THREAD(pVM);
115
116 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
117 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
118
119 pgmLock(pVM);
120
121 /*
122 * Copy loop on ram ranges.
123 */
124 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
125 for (;;)
126 {
127 /* Inside range or not? */
128 if (pRam && GCPhys >= pRam->GCPhys)
129 {
130 /*
131 * Must work our way thru this page by page.
132 */
133 RTGCPHYS off = GCPhys - pRam->GCPhys;
134 while (off < pRam->cb)
135 {
136 unsigned iPage = off >> PAGE_SHIFT;
137 PPGMPAGE pPage = &pRam->aPages[iPage];
138
139 /*
140 * If the page has an ALL access handler, we'll have to
141 * delegate the job to EMT.
142 */
143 if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
144 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
145 {
146 pgmUnlock(pVM);
147
148 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 5,
149 pVM, &GCPhys, pvBuf, cbRead, enmOrigin);
150 }
151 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
152
153 /*
154 * Simple stuff, go ahead.
155 */
156 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
157 if (cb > cbRead)
158 cb = cbRead;
159 PGMPAGEMAPLOCK PgMpLck;
160 const void *pvSrc;
161 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
162 if (RT_SUCCESS(rc))
163 {
164 memcpy(pvBuf, pvSrc, cb);
165 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
166 }
167 else
168 {
169 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
170 pRam->GCPhys + off, pPage, rc));
171 memset(pvBuf, 0xff, cb);
172 }
173
174 /* next page */
175 if (cb >= cbRead)
176 {
177 pgmUnlock(pVM);
178 return VINF_SUCCESS;
179 }
180 cbRead -= cb;
181 off += cb;
182 GCPhys += cb;
183 pvBuf = (char *)pvBuf + cb;
184 } /* walk pages in ram range. */
185 }
186 else
187 {
188 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
189
190 /*
191 * Unassigned address space.
192 */
193 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
194 if (cb >= cbRead)
195 {
196 memset(pvBuf, 0xff, cbRead);
197 break;
198 }
199 memset(pvBuf, 0xff, cb);
200
201 cbRead -= cb;
202 pvBuf = (char *)pvBuf + cb;
203 GCPhys += cb;
204 }
205
206 /* Advance range if necessary. */
207 while (pRam && GCPhys > pRam->GCPhysLast)
208 pRam = pRam->CTX_SUFF(pNext);
209 } /* Ram range walk */
210
211 pgmUnlock(pVM);
212
213 return VINF_SUCCESS;
214}
215
216
217/**
218 * EMT worker for PGMR3PhysWriteExternal.
219 */
220static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite,
221 PGMACCESSORIGIN enmOrigin)
222{
223 /** @todo VERR_EM_NO_MEMORY */
224 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite, enmOrigin);
225 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
226 return VINF_SUCCESS;
227}
228
229
230/**
231 * Write to physical memory, external users.
232 *
233 * @returns VBox status code.
234 * @retval VINF_SUCCESS.
235 * @retval VERR_EM_NO_MEMORY.
236 *
237 * @param pVM The cross context VM structure.
238 * @param GCPhys Physical address to write to.
239 * @param pvBuf What to write.
240 * @param cbWrite How many bytes to write.
241 * @param enmOrigin Who is calling.
242 *
243 * @thread Any but EMTs.
244 */
245VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
246{
247 VM_ASSERT_OTHER_THREAD(pVM);
248
249 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
250 ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x enmOrigin=%d\n",
251 GCPhys, cbWrite, enmOrigin));
252 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
253 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
254
255 pgmLock(pVM);
256
257 /*
258 * Copy loop on ram ranges, stop when we hit something difficult.
259 */
260 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
261 for (;;)
262 {
263 /* Inside range or not? */
264 if (pRam && GCPhys >= pRam->GCPhys)
265 {
266 /*
267 * Must work our way thru this page by page.
268 */
269 RTGCPTR off = GCPhys - pRam->GCPhys;
270 while (off < pRam->cb)
271 {
272 RTGCPTR iPage = off >> PAGE_SHIFT;
273 PPGMPAGE pPage = &pRam->aPages[iPage];
274
275 /*
276 * Is the page problematic, we have to do the work on the EMT.
277 *
278 * Allocating writable pages and access handlers are
279 * problematic, write monitored pages are simple and can be
280 * dealt with here.
281 */
282 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
283 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
284 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
285 {
286 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
287 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
288 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
289 else
290 {
291 pgmUnlock(pVM);
292
293 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 5,
294 pVM, &GCPhys, pvBuf, cbWrite, enmOrigin);
295 }
296 }
297 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
298
299 /*
300 * Simple stuff, go ahead.
301 */
302 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
303 if (cb > cbWrite)
304 cb = cbWrite;
305 PGMPAGEMAPLOCK PgMpLck;
306 void *pvDst;
307 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
308 if (RT_SUCCESS(rc))
309 {
310 memcpy(pvDst, pvBuf, cb);
311 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
312 }
313 else
314 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
315 pRam->GCPhys + off, pPage, rc));
316
317 /* next page */
318 if (cb >= cbWrite)
319 {
320 pgmUnlock(pVM);
321 return VINF_SUCCESS;
322 }
323
324 cbWrite -= cb;
325 off += cb;
326 GCPhys += cb;
327 pvBuf = (const char *)pvBuf + cb;
328 } /* walk pages in ram range */
329 }
330 else
331 {
332 /*
333 * Unassigned address space, skip it.
334 */
335 if (!pRam)
336 break;
337 size_t cb = pRam->GCPhys - GCPhys;
338 if (cb >= cbWrite)
339 break;
340 cbWrite -= cb;
341 pvBuf = (const char *)pvBuf + cb;
342 GCPhys += cb;
343 }
344
345 /* Advance range if necessary. */
346 while (pRam && GCPhys > pRam->GCPhysLast)
347 pRam = pRam->CTX_SUFF(pNext);
348 } /* Ram range walk */
349
350 pgmUnlock(pVM);
351 return VINF_SUCCESS;
352}
353
354
355/**
356 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
357 *
358 * @returns see PGMR3PhysGCPhys2CCPtrExternal
359 * @param pVM The cross context VM structure.
360 * @param pGCPhys Pointer to the guest physical address.
361 * @param ppv Where to store the mapping address.
362 * @param pLock Where to store the lock.
363 */
364static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
365{
366 /*
367 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
368 * an access handler after it succeeds.
369 */
370 int rc = pgmLock(pVM);
371 AssertRCReturn(rc, rc);
372
373 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
374 if (RT_SUCCESS(rc))
375 {
376 PPGMPAGEMAPTLBE pTlbe;
377 int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
378 AssertFatalRC(rc2);
379 PPGMPAGE pPage = pTlbe->pPage;
380 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
381 {
382 PGMPhysReleasePageMappingLock(pVM, pLock);
383 rc = VERR_PGM_PHYS_PAGE_RESERVED;
384 }
385 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
386#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
387 || pgmPoolIsDirtyPage(pVM, *pGCPhys)
388#endif
389 )
390 {
391 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
392 * not be informed about writes and keep bogus gst->shw mappings around.
393 */
394 pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
395 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
396 /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
397 * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
398 }
399 }
400
401 pgmUnlock(pVM);
402 return rc;
403}
404
405
406/**
407 * Requests the mapping of a guest page into ring-3, external threads.
408 *
409 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
410 * release it.
411 *
412 * This API will assume your intention is to write to the page, and will
413 * therefore replace shared and zero pages. If you do not intend to modify the
414 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
415 *
416 * @returns VBox status code.
417 * @retval VINF_SUCCESS on success.
418 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
419 * backing or if the page has any active access handlers. The caller
420 * must fall back on using PGMR3PhysWriteExternal.
421 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
422 *
423 * @param pVM The cross context VM structure.
424 * @param GCPhys The guest physical address of the page that should be mapped.
425 * @param ppv Where to store the address corresponding to GCPhys.
426 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
427 *
428 * @remark Avoid calling this API from within critical sections (other than the
429 * PGM one) because of the deadlock risk when we have to delegating the
430 * task to an EMT.
431 * @thread Any.
432 */
433VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
434{
435 AssertPtr(ppv);
436 AssertPtr(pLock);
437
438 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
439
440 int rc = pgmLock(pVM);
441 AssertRCReturn(rc, rc);
442
443 /*
444 * Query the Physical TLB entry for the page (may fail).
445 */
446 PPGMPAGEMAPTLBE pTlbe;
447 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
448 if (RT_SUCCESS(rc))
449 {
450 PPGMPAGE pPage = pTlbe->pPage;
451 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
452 rc = VERR_PGM_PHYS_PAGE_RESERVED;
453 else
454 {
455 /*
456 * If the page is shared, the zero page, or being write monitored
457 * it must be converted to an page that's writable if possible.
458 * We can only deal with write monitored pages here, the rest have
459 * to be on an EMT.
460 */
461 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
462 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
463#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
464 || pgmPoolIsDirtyPage(pVM, GCPhys)
465#endif
466 )
467 {
468 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
469 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
470#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
471 && !pgmPoolIsDirtyPage(pVM, GCPhys)
472#endif
473 )
474 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
475 else
476 {
477 pgmUnlock(pVM);
478
479 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
480 pVM, &GCPhys, ppv, pLock);
481 }
482 }
483
484 /*
485 * Now, just perform the locking and calculate the return address.
486 */
487 PPGMPAGEMAP pMap = pTlbe->pMap;
488 if (pMap)
489 pMap->cRefs++;
490
491 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
492 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
493 {
494 if (cLocks == 0)
495 pVM->pgm.s.cWriteLockedPages++;
496 PGM_PAGE_INC_WRITE_LOCKS(pPage);
497 }
498 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
499 {
500 PGM_PAGE_INC_WRITE_LOCKS(pPage);
501 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
502 if (pMap)
503 pMap->cRefs++; /* Extra ref to prevent it from going away. */
504 }
505
506 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
507 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
508 pLock->pvMap = pMap;
509 }
510 }
511
512 pgmUnlock(pVM);
513 return rc;
514}
515
516
517/**
518 * Requests the mapping of a guest page into ring-3, external threads.
519 *
520 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
521 * release it.
522 *
523 * @returns VBox status code.
524 * @retval VINF_SUCCESS on success.
525 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
526 * backing or if the page as an active ALL access handler. The caller
527 * must fall back on using PGMPhysRead.
528 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
529 *
530 * @param pVM The cross context VM structure.
531 * @param GCPhys The guest physical address of the page that should be mapped.
532 * @param ppv Where to store the address corresponding to GCPhys.
533 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
534 *
535 * @remark Avoid calling this API from within critical sections (other than
536 * the PGM one) because of the deadlock risk.
537 * @thread Any.
538 */
539VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
540{
541 int rc = pgmLock(pVM);
542 AssertRCReturn(rc, rc);
543
544 /*
545 * Query the Physical TLB entry for the page (may fail).
546 */
547 PPGMPAGEMAPTLBE pTlbe;
548 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
549 if (RT_SUCCESS(rc))
550 {
551 PPGMPAGE pPage = pTlbe->pPage;
552#if 1
553 /* MMIO pages doesn't have any readable backing. */
554 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
555 rc = VERR_PGM_PHYS_PAGE_RESERVED;
556#else
557 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
558 rc = VERR_PGM_PHYS_PAGE_RESERVED;
559#endif
560 else
561 {
562 /*
563 * Now, just perform the locking and calculate the return address.
564 */
565 PPGMPAGEMAP pMap = pTlbe->pMap;
566 if (pMap)
567 pMap->cRefs++;
568
569 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
570 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
571 {
572 if (cLocks == 0)
573 pVM->pgm.s.cReadLockedPages++;
574 PGM_PAGE_INC_READ_LOCKS(pPage);
575 }
576 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
577 {
578 PGM_PAGE_INC_READ_LOCKS(pPage);
579 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
580 if (pMap)
581 pMap->cRefs++; /* Extra ref to prevent it from going away. */
582 }
583
584 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
585 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
586 pLock->pvMap = pMap;
587 }
588 }
589
590 pgmUnlock(pVM);
591 return rc;
592}
593
594
595#define MAKE_LEAF(a_pNode) \
596 do { \
597 (a_pNode)->pLeftR3 = NIL_RTR3PTR; \
598 (a_pNode)->pRightR3 = NIL_RTR3PTR; \
599 (a_pNode)->pLeftR0 = NIL_RTR0PTR; \
600 (a_pNode)->pRightR0 = NIL_RTR0PTR; \
601 (a_pNode)->pLeftRC = NIL_RTRCPTR; \
602 (a_pNode)->pRightRC = NIL_RTRCPTR; \
603 } while (0)
604
605#define INSERT_LEFT(a_pParent, a_pNode) \
606 do { \
607 (a_pParent)->pLeftR3 = (a_pNode); \
608 (a_pParent)->pLeftR0 = (a_pNode)->pSelfR0; \
609 (a_pParent)->pLeftRC = (a_pNode)->pSelfRC; \
610 } while (0)
611#define INSERT_RIGHT(a_pParent, a_pNode) \
612 do { \
613 (a_pParent)->pRightR3 = (a_pNode); \
614 (a_pParent)->pRightR0 = (a_pNode)->pSelfR0; \
615 (a_pParent)->pRightRC = (a_pNode)->pSelfRC; \
616 } while (0)
617
618
619/**
620 * Recursive tree builder.
621 *
622 * @param ppRam Pointer to the iterator variable.
623 * @param iDepth The current depth. Inserts a leaf node if 0.
624 */
625static PPGMRAMRANGE pgmR3PhysRebuildRamRangeSearchTreesRecursively(PPGMRAMRANGE *ppRam, int iDepth)
626{
627 PPGMRAMRANGE pRam;
628 if (iDepth <= 0)
629 {
630 /*
631 * Leaf node.
632 */
633 pRam = *ppRam;
634 if (pRam)
635 {
636 *ppRam = pRam->pNextR3;
637 MAKE_LEAF(pRam);
638 }
639 }
640 else
641 {
642
643 /*
644 * Intermediate node.
645 */
646 PPGMRAMRANGE pLeft = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
647
648 pRam = *ppRam;
649 if (!pRam)
650 return pLeft;
651 *ppRam = pRam->pNextR3;
652 MAKE_LEAF(pRam);
653 INSERT_LEFT(pRam, pLeft);
654
655 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
656 if (pRight)
657 INSERT_RIGHT(pRam, pRight);
658 }
659 return pRam;
660}
661
662
663/**
664 * Rebuilds the RAM range search trees.
665 *
666 * @param pVM The cross context VM structure.
667 */
668static void pgmR3PhysRebuildRamRangeSearchTrees(PVM pVM)
669{
670
671 /*
672 * Create the reasonably balanced tree in a sequential fashion.
673 * For simplicity (laziness) we use standard recursion here.
674 */
675 int iDepth = 0;
676 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
677 PPGMRAMRANGE pRoot = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, 0);
678 while (pRam)
679 {
680 PPGMRAMRANGE pLeft = pRoot;
681
682 pRoot = pRam;
683 pRam = pRam->pNextR3;
684 MAKE_LEAF(pRoot);
685 INSERT_LEFT(pRoot, pLeft);
686
687 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, iDepth);
688 if (pRight)
689 INSERT_RIGHT(pRoot, pRight);
690 /** @todo else: rotate the tree. */
691
692 iDepth++;
693 }
694
695 pVM->pgm.s.pRamRangeTreeR3 = pRoot;
696 pVM->pgm.s.pRamRangeTreeR0 = pRoot ? pRoot->pSelfR0 : NIL_RTR0PTR;
697 pVM->pgm.s.pRamRangeTreeRC = pRoot ? pRoot->pSelfRC : NIL_RTRCPTR;
698
699#ifdef VBOX_STRICT
700 /*
701 * Verify that the above code works.
702 */
703 unsigned cRanges = 0;
704 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
705 cRanges++;
706 Assert(cRanges > 0);
707
708 unsigned cMaxDepth = ASMBitLastSetU32(cRanges);
709 if ((1U << cMaxDepth) < cRanges)
710 cMaxDepth++;
711
712 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
713 {
714 unsigned cDepth = 0;
715 PPGMRAMRANGE pRam2 = pVM->pgm.s.pRamRangeTreeR3;
716 for (;;)
717 {
718 if (pRam == pRam2)
719 break;
720 Assert(pRam2);
721 if (pRam->GCPhys < pRam2->GCPhys)
722 pRam2 = pRam2->pLeftR3;
723 else
724 pRam2 = pRam2->pRightR3;
725 }
726 AssertMsg(cDepth <= cMaxDepth, ("cDepth=%d cMaxDepth=%d\n", cDepth, cMaxDepth));
727 }
728#endif /* VBOX_STRICT */
729}
730
731#undef MAKE_LEAF
732#undef INSERT_LEFT
733#undef INSERT_RIGHT
734
735/**
736 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
737 *
738 * Called when anything was relocated.
739 *
740 * @param pVM The cross context VM structure.
741 */
742void pgmR3PhysRelinkRamRanges(PVM pVM)
743{
744 PPGMRAMRANGE pCur;
745
746#ifdef VBOX_STRICT
747 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
748 {
749 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
750 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
751 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
752 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
753 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
754 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
755 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3)
756 Assert( pCur2 == pCur
757 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
758 }
759#endif
760
761 pCur = pVM->pgm.s.pRamRangesXR3;
762 if (pCur)
763 {
764 pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0;
765 pVM->pgm.s.pRamRangesXRC = pCur->pSelfRC;
766
767 for (; pCur->pNextR3; pCur = pCur->pNextR3)
768 {
769 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
770 pCur->pNextRC = pCur->pNextR3->pSelfRC;
771 }
772
773 Assert(pCur->pNextR0 == NIL_RTR0PTR);
774 Assert(pCur->pNextRC == NIL_RTRCPTR);
775 }
776 else
777 {
778 Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR);
779 Assert(pVM->pgm.s.pRamRangesXRC == NIL_RTRCPTR);
780 }
781 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
782
783 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
784}
785
786
787/**
788 * Links a new RAM range into the list.
789 *
790 * @param pVM The cross context VM structure.
791 * @param pNew Pointer to the new list entry.
792 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
793 */
794static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
795{
796 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
797 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
798 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
799
800 pgmLock(pVM);
801
802 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3;
803 pNew->pNextR3 = pRam;
804 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
805 pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
806
807 if (pPrev)
808 {
809 pPrev->pNextR3 = pNew;
810 pPrev->pNextR0 = pNew->pSelfR0;
811 pPrev->pNextRC = pNew->pSelfRC;
812 }
813 else
814 {
815 pVM->pgm.s.pRamRangesXR3 = pNew;
816 pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0;
817 pVM->pgm.s.pRamRangesXRC = pNew->pSelfRC;
818 }
819 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
820
821 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
822 pgmUnlock(pVM);
823}
824
825
826/**
827 * Unlink an existing RAM range from the list.
828 *
829 * @param pVM The cross context VM structure.
830 * @param pRam Pointer to the new list entry.
831 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
832 */
833static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
834{
835 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam);
836 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
837 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
838
839 pgmLock(pVM);
840
841 PPGMRAMRANGE pNext = pRam->pNextR3;
842 if (pPrev)
843 {
844 pPrev->pNextR3 = pNext;
845 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
846 pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
847 }
848 else
849 {
850 Assert(pVM->pgm.s.pRamRangesXR3 == pRam);
851 pVM->pgm.s.pRamRangesXR3 = pNext;
852 pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
853 pVM->pgm.s.pRamRangesXRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
854 }
855 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
856
857 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
858 pgmUnlock(pVM);
859}
860
861
862/**
863 * Unlink an existing RAM range from the list.
864 *
865 * @param pVM The cross context VM structure.
866 * @param pRam Pointer to the new list entry.
867 */
868static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
869{
870 pgmLock(pVM);
871
872 /* find prev. */
873 PPGMRAMRANGE pPrev = NULL;
874 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3;
875 while (pCur != pRam)
876 {
877 pPrev = pCur;
878 pCur = pCur->pNextR3;
879 }
880 AssertFatal(pCur);
881
882 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
883 pgmUnlock(pVM);
884}
885
886
887/**
888 * Frees a range of pages, replacing them with ZERO pages of the specified type.
889 *
890 * @returns VBox status code.
891 * @param pVM The cross context VM structure.
892 * @param pRam The RAM range in which the pages resides.
893 * @param GCPhys The address of the first page.
894 * @param GCPhysLast The address of the last page.
895 * @param uType The page type to replace then with.
896 */
897static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
898{
899 PGM_LOCK_ASSERT_OWNER(pVM);
900 uint32_t cPendingPages = 0;
901 PGMMFREEPAGESREQ pReq;
902 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
903 AssertLogRelRCReturn(rc, rc);
904
905 /* Iterate the pages. */
906 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
907 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
908 while (cPagesLeft-- > 0)
909 {
910 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
911 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
912
913 PGM_PAGE_SET_TYPE(pVM, pPageDst, uType);
914
915 GCPhys += PAGE_SIZE;
916 pPageDst++;
917 }
918
919 if (cPendingPages)
920 {
921 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
922 AssertLogRelRCReturn(rc, rc);
923 }
924 GMMR3FreePagesCleanup(pReq);
925
926 return rc;
927}
928
929#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
930
931/**
932 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
933 *
934 * This is only called on one of the EMTs while the other ones are waiting for
935 * it to complete this function.
936 *
937 * @returns VINF_SUCCESS (VBox strict status code).
938 * @param pVM The cross context VM structure.
939 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
940 * @param pvUser User parameter
941 */
942static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
943{
944 uintptr_t *paUser = (uintptr_t *)pvUser;
945 bool fInflate = !!paUser[0];
946 unsigned cPages = paUser[1];
947 RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
948 uint32_t cPendingPages = 0;
949 PGMMFREEPAGESREQ pReq;
950 int rc;
951
952 Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
953 pgmLock(pVM);
954
955 if (fInflate)
956 {
957 /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
958 pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
959
960 /* Replace pages with ZERO pages. */
961 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
962 if (RT_FAILURE(rc))
963 {
964 pgmUnlock(pVM);
965 AssertLogRelRC(rc);
966 return rc;
967 }
968
969 /* Iterate the pages. */
970 for (unsigned i = 0; i < cPages; i++)
971 {
972 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
973 if ( pPage == NULL
974 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
975 {
976 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
977 break;
978 }
979
980 LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
981
982 /* Flush the shadow PT if this page was previously used as a guest page table. */
983 pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
984
985 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i]);
986 if (RT_FAILURE(rc))
987 {
988 pgmUnlock(pVM);
989 AssertLogRelRC(rc);
990 return rc;
991 }
992 Assert(PGM_PAGE_IS_ZERO(pPage));
993 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
994 }
995
996 if (cPendingPages)
997 {
998 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
999 if (RT_FAILURE(rc))
1000 {
1001 pgmUnlock(pVM);
1002 AssertLogRelRC(rc);
1003 return rc;
1004 }
1005 }
1006 GMMR3FreePagesCleanup(pReq);
1007 }
1008 else
1009 {
1010 /* Iterate the pages. */
1011 for (unsigned i = 0; i < cPages; i++)
1012 {
1013 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
1014 AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
1015
1016 LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
1017
1018 Assert(PGM_PAGE_IS_BALLOONED(pPage));
1019
1020 /* Change back to zero page. */
1021 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1022 }
1023
1024 /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
1025 }
1026
1027 /* Notify GMM about the balloon change. */
1028 rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
1029 if (RT_SUCCESS(rc))
1030 {
1031 if (!fInflate)
1032 {
1033 Assert(pVM->pgm.s.cBalloonedPages >= cPages);
1034 pVM->pgm.s.cBalloonedPages -= cPages;
1035 }
1036 else
1037 pVM->pgm.s.cBalloonedPages += cPages;
1038 }
1039
1040 pgmUnlock(pVM);
1041
1042 /* Flush the recompiler's TLB as well. */
1043 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1044 CPUMSetChangedFlags(&pVM->aCpus[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1045
1046 AssertLogRelRC(rc);
1047 return rc;
1048}
1049
1050
1051/**
1052 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
1053 *
1054 * @returns VBox status code.
1055 * @param pVM The cross context VM structure.
1056 * @param fInflate Inflate or deflate memory balloon
1057 * @param cPages Number of pages to free
1058 * @param paPhysPage Array of guest physical addresses
1059 */
1060static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1061{
1062 uintptr_t paUser[3];
1063
1064 paUser[0] = fInflate;
1065 paUser[1] = cPages;
1066 paUser[2] = (uintptr_t)paPhysPage;
1067 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1068 AssertRC(rc);
1069
1070 /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
1071 RTMemFree(paPhysPage);
1072}
1073
1074#endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
1075
1076/**
1077 * Inflate or deflate a memory balloon
1078 *
1079 * @returns VBox status code.
1080 * @param pVM The cross context VM structure.
1081 * @param fInflate Inflate or deflate memory balloon
1082 * @param cPages Number of pages to free
1083 * @param paPhysPage Array of guest physical addresses
1084 */
1085VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1086{
1087 /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
1088#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
1089 int rc;
1090
1091 /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
1092 AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
1093
1094 /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
1095 * In the SMP case we post a request packet to postpone the job.
1096 */
1097 if (pVM->cCpus > 1)
1098 {
1099 unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
1100 RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
1101 AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
1102
1103 memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
1104
1105 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
1106 AssertRC(rc);
1107 }
1108 else
1109 {
1110 uintptr_t paUser[3];
1111
1112 paUser[0] = fInflate;
1113 paUser[1] = cPages;
1114 paUser[2] = (uintptr_t)paPhysPage;
1115 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1116 AssertRC(rc);
1117 }
1118 return rc;
1119
1120#else
1121 NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
1122 return VERR_NOT_IMPLEMENTED;
1123#endif
1124}
1125
1126
1127/**
1128 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
1129 * physical RAM.
1130 *
1131 * This is only called on one of the EMTs while the other ones are waiting for
1132 * it to complete this function.
1133 *
1134 * @returns VINF_SUCCESS (VBox strict status code).
1135 * @param pVM The cross context VM structure.
1136 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
1137 * @param pvUser User parameter, unused.
1138 */
1139static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
1140{
1141 int rc = VINF_SUCCESS;
1142 NOREF(pvUser); NOREF(pVCpu);
1143
1144 pgmLock(pVM);
1145#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1146 pgmPoolResetDirtyPages(pVM);
1147#endif
1148
1149 /** @todo pointless to write protect the physical page pointed to by RSP. */
1150
1151 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
1152 pRam;
1153 pRam = pRam->CTX_SUFF(pNext))
1154 {
1155 uint32_t cPages = pRam->cb >> PAGE_SHIFT;
1156 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1157 {
1158 PPGMPAGE pPage = &pRam->aPages[iPage];
1159 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1160
1161 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
1162 || enmPageType == PGMPAGETYPE_MMIO2)
1163 {
1164 /*
1165 * A RAM page.
1166 */
1167 switch (PGM_PAGE_GET_STATE(pPage))
1168 {
1169 case PGM_PAGE_STATE_ALLOCATED:
1170 /** @todo Optimize this: Don't always re-enable write
1171 * monitoring if the page is known to be very busy. */
1172 if (PGM_PAGE_IS_WRITTEN_TO(pPage))
1173 {
1174 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1175 /* Remember this dirty page for the next (memory) sync. */
1176 PGM_PAGE_SET_FT_DIRTY(pPage);
1177 }
1178
1179 pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1180 break;
1181
1182 case PGM_PAGE_STATE_SHARED:
1183 AssertFailed();
1184 break;
1185
1186 case PGM_PAGE_STATE_WRITE_MONITORED: /* nothing to change. */
1187 default:
1188 break;
1189 }
1190 }
1191 }
1192 }
1193 pgmR3PoolWriteProtectPages(pVM);
1194 PGM_INVL_ALL_VCPU_TLBS(pVM);
1195 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1196 CPUMSetChangedFlags(&pVM->aCpus[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1197
1198 pgmUnlock(pVM);
1199 return rc;
1200}
1201
1202/**
1203 * Protect all physical RAM to monitor writes
1204 *
1205 * @returns VBox status code.
1206 * @param pVM The cross context VM structure.
1207 */
1208VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
1209{
1210 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1211
1212 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
1213 AssertRC(rc);
1214 return rc;
1215}
1216
1217/**
1218 * Enumerate all dirty FT pages.
1219 *
1220 * @returns VBox status code.
1221 * @param pVM The cross context VM structure.
1222 * @param pfnEnum Enumerate callback handler.
1223 * @param pvUser Enumerate callback handler parameter.
1224 */
1225VMMR3DECL(int) PGMR3PhysEnumDirtyFTPages(PVM pVM, PFNPGMENUMDIRTYFTPAGES pfnEnum, void *pvUser)
1226{
1227 int rc = VINF_SUCCESS;
1228
1229 pgmLock(pVM);
1230 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
1231 pRam;
1232 pRam = pRam->CTX_SUFF(pNext))
1233 {
1234 uint32_t cPages = pRam->cb >> PAGE_SHIFT;
1235 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1236 {
1237 PPGMPAGE pPage = &pRam->aPages[iPage];
1238 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1239
1240 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
1241 || enmPageType == PGMPAGETYPE_MMIO2)
1242 {
1243 /*
1244 * A RAM page.
1245 */
1246 switch (PGM_PAGE_GET_STATE(pPage))
1247 {
1248 case PGM_PAGE_STATE_ALLOCATED:
1249 case PGM_PAGE_STATE_WRITE_MONITORED:
1250 if ( !PGM_PAGE_IS_WRITTEN_TO(pPage) /* not very recently updated? */
1251 && PGM_PAGE_IS_FT_DIRTY(pPage))
1252 {
1253 unsigned cbPageRange = PAGE_SIZE;
1254 unsigned iPageClean = iPage + 1;
1255 RTGCPHYS GCPhysPage = pRam->GCPhys + iPage * PAGE_SIZE;
1256 uint8_t *pu8Page = NULL;
1257 PGMPAGEMAPLOCK Lock;
1258
1259 /* Find the next clean page, so we can merge adjacent dirty pages. */
1260 for (; iPageClean < cPages; iPageClean++)
1261 {
1262 PPGMPAGE pPageNext = &pRam->aPages[iPageClean];
1263 if ( RT_UNLIKELY(PGM_PAGE_GET_TYPE(pPageNext) != PGMPAGETYPE_RAM)
1264 || PGM_PAGE_GET_STATE(pPageNext) != PGM_PAGE_STATE_ALLOCATED
1265 || PGM_PAGE_IS_WRITTEN_TO(pPageNext)
1266 || !PGM_PAGE_IS_FT_DIRTY(pPageNext)
1267 /* Crossing a chunk boundary? */
1268 || (GCPhysPage & GMM_PAGEID_IDX_MASK) != ((GCPhysPage + cbPageRange) & GMM_PAGEID_IDX_MASK)
1269 )
1270 break;
1271
1272 cbPageRange += PAGE_SIZE;
1273 }
1274
1275 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysPage, (const void **)&pu8Page, &Lock);
1276 if (RT_SUCCESS(rc))
1277 {
1278 /** @todo this is risky; the range might be changed, but little choice as the sync
1279 * costs a lot of time. */
1280 pgmUnlock(pVM);
1281 pfnEnum(pVM, GCPhysPage, pu8Page, cbPageRange, pvUser);
1282 pgmLock(pVM);
1283 PGMPhysReleasePageMappingLock(pVM, &Lock);
1284 }
1285
1286 for (iPage; iPage < iPageClean; iPage++)
1287 PGM_PAGE_CLEAR_FT_DIRTY(&pRam->aPages[iPage]);
1288
1289 iPage = iPageClean - 1;
1290 }
1291 break;
1292 }
1293 }
1294 }
1295 }
1296 pgmUnlock(pVM);
1297 return rc;
1298}
1299
1300
1301/**
1302 * Gets the number of ram ranges.
1303 *
1304 * @returns Number of ram ranges. Returns UINT32_MAX if @a pVM is invalid.
1305 * @param pVM The cross context VM structure.
1306 */
1307VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM)
1308{
1309 VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
1310
1311 pgmLock(pVM);
1312 uint32_t cRamRanges = 0;
1313 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext))
1314 cRamRanges++;
1315 pgmUnlock(pVM);
1316 return cRamRanges;
1317}
1318
1319
1320/**
1321 * Get information about a range.
1322 *
1323 * @returns VINF_SUCCESS or VERR_OUT_OF_RANGE.
1324 * @param pVM The cross context VM structure.
1325 * @param iRange The ordinal of the range.
1326 * @param pGCPhysStart Where to return the start of the range. Optional.
1327 * @param pGCPhysLast Where to return the address of the last byte in the
1328 * range. Optional.
1329 * @param ppszDesc Where to return the range description. Optional.
1330 * @param pfIsMmio Where to indicate that this is a pure MMIO range.
1331 * Optional.
1332 */
1333VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
1334 const char **ppszDesc, bool *pfIsMmio)
1335{
1336 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1337
1338 pgmLock(pVM);
1339 uint32_t iCurRange = 0;
1340 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++)
1341 if (iCurRange == iRange)
1342 {
1343 if (pGCPhysStart)
1344 *pGCPhysStart = pCur->GCPhys;
1345 if (pGCPhysLast)
1346 *pGCPhysLast = pCur->GCPhysLast;
1347 if (ppszDesc)
1348 *ppszDesc = pCur->pszDesc;
1349 if (pfIsMmio)
1350 *pfIsMmio = !!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO);
1351
1352 pgmUnlock(pVM);
1353 return VINF_SUCCESS;
1354 }
1355 pgmUnlock(pVM);
1356 return VERR_OUT_OF_RANGE;
1357}
1358
1359
1360/**
1361 * Query the amount of free memory inside VMMR0
1362 *
1363 * @returns VBox status code.
1364 * @param pUVM The user mode VM handle.
1365 * @param pcbAllocMem Where to return the amount of memory allocated
1366 * by VMs.
1367 * @param pcbFreeMem Where to return the amount of memory that is
1368 * allocated from the host but not currently used
1369 * by any VMs.
1370 * @param pcbBallonedMem Where to return the sum of memory that is
1371 * currently ballooned by the VMs.
1372 * @param pcbSharedMem Where to return the amount of memory that is
1373 * currently shared.
1374 */
1375VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
1376 uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
1377{
1378 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1379 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
1380
1381 uint64_t cAllocPages = 0;
1382 uint64_t cFreePages = 0;
1383 uint64_t cBalloonPages = 0;
1384 uint64_t cSharedPages = 0;
1385 int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
1386 AssertRCReturn(rc, rc);
1387
1388 if (pcbAllocMem)
1389 *pcbAllocMem = cAllocPages * _4K;
1390
1391 if (pcbFreeMem)
1392 *pcbFreeMem = cFreePages * _4K;
1393
1394 if (pcbBallonedMem)
1395 *pcbBallonedMem = cBalloonPages * _4K;
1396
1397 if (pcbSharedMem)
1398 *pcbSharedMem = cSharedPages * _4K;
1399
1400 Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
1401 cAllocPages, cFreePages, cBalloonPages, cSharedPages));
1402 return VINF_SUCCESS;
1403}
1404
1405
1406/**
1407 * Query memory stats for the VM.
1408 *
1409 * @returns VBox status code.
1410 * @param pUVM The user mode VM handle.
1411 * @param pcbTotalMem Where to return total amount memory the VM may
1412 * possibly use.
1413 * @param pcbPrivateMem Where to return the amount of private memory
1414 * currently allocated.
1415 * @param pcbSharedMem Where to return the amount of actually shared
1416 * memory currently used by the VM.
1417 * @param pcbZeroMem Where to return the amount of memory backed by
1418 * zero pages.
1419 *
1420 * @remarks The total mem is normally larger than the sum of the three
1421 * components. There are two reasons for this, first the amount of
1422 * shared memory is what we're sure is shared instead of what could
1423 * possibly be shared with someone. Secondly, because the total may
1424 * include some pure MMIO pages that doesn't go into any of the three
1425 * sub-counts.
1426 *
1427 * @todo Why do we return reused shared pages instead of anything that could
1428 * potentially be shared? Doesn't this mean the first VM gets a much
1429 * lower number of shared pages?
1430 */
1431VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
1432 uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
1433{
1434 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1435 PVM pVM = pUVM->pVM;
1436 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1437
1438 if (pcbTotalMem)
1439 *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * PAGE_SIZE;
1440
1441 if (pcbPrivateMem)
1442 *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * PAGE_SIZE;
1443
1444 if (pcbSharedMem)
1445 *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * PAGE_SIZE;
1446
1447 if (pcbZeroMem)
1448 *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * PAGE_SIZE;
1449
1450 Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
1451 return VINF_SUCCESS;
1452}
1453
1454
1455/**
1456 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
1457 *
1458 * @param pVM The cross context VM structure.
1459 * @param pNew The new RAM range.
1460 * @param GCPhys The address of the RAM range.
1461 * @param GCPhysLast The last address of the RAM range.
1462 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
1463 * if in HMA.
1464 * @param R0PtrNew Ditto for R0.
1465 * @param pszDesc The description.
1466 * @param pPrev The previous RAM range (for linking).
1467 */
1468static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
1469 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
1470{
1471 /*
1472 * Initialize the range.
1473 */
1474 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
1475 pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
1476 pNew->GCPhys = GCPhys;
1477 pNew->GCPhysLast = GCPhysLast;
1478 pNew->cb = GCPhysLast - GCPhys + 1;
1479 pNew->pszDesc = pszDesc;
1480 pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
1481 pNew->pvR3 = NULL;
1482 pNew->paLSPages = NULL;
1483
1484 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
1485 RTGCPHYS iPage = cPages;
1486 while (iPage-- > 0)
1487 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
1488
1489 /* Update the page count stats. */
1490 pVM->pgm.s.cZeroPages += cPages;
1491 pVM->pgm.s.cAllPages += cPages;
1492
1493 /*
1494 * Link it.
1495 */
1496 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
1497}
1498
1499
1500/**
1501 * Relocate a floating RAM range.
1502 *
1503 * @copydoc FNPGMRELOCATE
1504 */
1505static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
1506 PGMRELOCATECALL enmMode, void *pvUser)
1507{
1508 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
1509 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
1510 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE);
1511
1512 switch (enmMode)
1513 {
1514 case PGMRELOCATECALL_SUGGEST:
1515 return true;
1516
1517 case PGMRELOCATECALL_RELOCATE:
1518 {
1519 /*
1520 * Update myself, then relink all the ranges and flush the RC TLB.
1521 */
1522 pgmLock(pVM);
1523
1524 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
1525
1526 pgmR3PhysRelinkRamRanges(pVM);
1527 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
1528 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
1529
1530 pgmUnlock(pVM);
1531 return true;
1532 }
1533
1534 default:
1535 AssertFailedReturn(false);
1536 }
1537}
1538
1539
1540/**
1541 * PGMR3PhysRegisterRam worker that registers a high chunk.
1542 *
1543 * @returns VBox status code.
1544 * @param pVM The cross context VM structure.
1545 * @param GCPhys The address of the RAM.
1546 * @param cRamPages The number of RAM pages to register.
1547 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
1548 * @param iChunk The chunk number.
1549 * @param pszDesc The RAM range description.
1550 * @param ppPrev Previous RAM range pointer. In/Out.
1551 */
1552static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
1553 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
1554 PPGMRAMRANGE *ppPrev)
1555{
1556 const char *pszDescChunk = iChunk == 0
1557 ? pszDesc
1558 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
1559 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
1560
1561 /*
1562 * Allocate memory for the new chunk.
1563 */
1564 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
1565 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
1566 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
1567 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
1568 void *pvChunk = NULL;
1569 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
1570#if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS)
1571 &R0PtrChunk,
1572#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
1573 HMIsEnabled(pVM) ? &R0PtrChunk : NULL,
1574#else
1575 NULL,
1576#endif
1577 paChunkPages);
1578 if (RT_SUCCESS(rc))
1579 {
1580#if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS)
1581 Assert(R0PtrChunk != NIL_RTR0PTR);
1582#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
1583 if (!HMIsEnabled(pVM))
1584 R0PtrChunk = NIL_RTR0PTR;
1585#else
1586 R0PtrChunk = (uintptr_t)pvChunk;
1587#endif
1588 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
1589
1590 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
1591
1592 /*
1593 * Create a mapping and map the pages into it.
1594 * We push these in below the HMA.
1595 */
1596 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
1597 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
1598 if (RT_SUCCESS(rc))
1599 {
1600 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
1601
1602 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
1603 RTGCPTR GCPtrPage = GCPtrChunk;
1604 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
1605 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
1606 if (RT_SUCCESS(rc))
1607 {
1608 /*
1609 * Ok, init and link the range.
1610 */
1611 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
1612 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
1613 *ppPrev = pNew;
1614 }
1615 }
1616
1617 if (RT_FAILURE(rc))
1618 SUPR3PageFreeEx(pvChunk, cChunkPages);
1619 }
1620
1621 RTMemTmpFree(paChunkPages);
1622 return rc;
1623}
1624
1625
1626/**
1627 * Sets up a range RAM.
1628 *
1629 * This will check for conflicting registrations, make a resource
1630 * reservation for the memory (with GMM), and setup the per-page
1631 * tracking structures (PGMPAGE).
1632 *
1633 * @returns VBox status code.
1634 * @param pVM The cross context VM structure.
1635 * @param GCPhys The physical address of the RAM.
1636 * @param cb The size of the RAM.
1637 * @param pszDesc The description - not copied, so, don't free or change it.
1638 */
1639VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
1640{
1641 /*
1642 * Validate input.
1643 */
1644 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
1645 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1646 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1647 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
1648 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1649 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
1650 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1651 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1652
1653 pgmLock(pVM);
1654
1655 /*
1656 * Find range location and check for conflicts.
1657 * (We don't lock here because the locking by EMT is only required on update.)
1658 */
1659 PPGMRAMRANGE pPrev = NULL;
1660 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
1661 while (pRam && GCPhysLast >= pRam->GCPhys)
1662 {
1663 if ( GCPhysLast >= pRam->GCPhys
1664 && GCPhys <= pRam->GCPhysLast)
1665 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1666 GCPhys, GCPhysLast, pszDesc,
1667 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1668 VERR_PGM_RAM_CONFLICT);
1669
1670 /* next */
1671 pPrev = pRam;
1672 pRam = pRam->pNextR3;
1673 }
1674
1675 /*
1676 * Register it with GMM (the API bitches).
1677 */
1678 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
1679 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
1680 if (RT_FAILURE(rc))
1681 {
1682 pgmUnlock(pVM);
1683 return rc;
1684 }
1685
1686 if ( GCPhys >= _4G
1687 && cPages > 256)
1688 {
1689 /*
1690 * The PGMRAMRANGE structures for the high memory can get very big.
1691 * In order to avoid SUPR3PageAllocEx allocation failures due to the
1692 * allocation size limit there and also to avoid being unable to find
1693 * guest mapping space for them, we split this memory up into 4MB in
1694 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
1695 * mode.
1696 *
1697 * The first and last page of each mapping are guard pages and marked
1698 * not-present. So, we've got 4186112 and 16769024 bytes available for
1699 * the PGMRAMRANGE structure.
1700 *
1701 * Note! The sizes used here will influence the saved state.
1702 */
1703 uint32_t cbChunk;
1704 uint32_t cPagesPerChunk;
1705 if (HMIsEnabled(pVM))
1706 {
1707 cbChunk = 16U*_1M;
1708 cPagesPerChunk = 1048048; /* max ~1048059 */
1709 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
1710 }
1711 else
1712 {
1713 cbChunk = 4U*_1M;
1714 cPagesPerChunk = 261616; /* max ~261627 */
1715 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616 < 4U*_1M - PAGE_SIZE * 2);
1716 }
1717 AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
1718
1719 RTGCPHYS cPagesLeft = cPages;
1720 RTGCPHYS GCPhysChunk = GCPhys;
1721 uint32_t iChunk = 0;
1722 while (cPagesLeft > 0)
1723 {
1724 uint32_t cPagesInChunk = cPagesLeft;
1725 if (cPagesInChunk > cPagesPerChunk)
1726 cPagesInChunk = cPagesPerChunk;
1727
1728 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1729 AssertRCReturn(rc, rc);
1730
1731 /* advance */
1732 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1733 cPagesLeft -= cPagesInChunk;
1734 iChunk++;
1735 }
1736 }
1737 else
1738 {
1739 /*
1740 * Allocate, initialize and link the new RAM range.
1741 */
1742 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
1743 PPGMRAMRANGE pNew;
1744 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1745 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1746
1747 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1748 }
1749 pgmPhysInvalidatePageMapTLB(pVM);
1750 pgmUnlock(pVM);
1751
1752#ifdef VBOX_WITH_REM
1753 /*
1754 * Notify REM.
1755 */
1756 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
1757#endif
1758
1759 return VINF_SUCCESS;
1760}
1761
1762
1763/**
1764 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
1765 *
1766 * We do this late in the init process so that all the ROM and MMIO ranges have
1767 * been registered already and we don't go wasting memory on them.
1768 *
1769 * @returns VBox status code.
1770 *
1771 * @param pVM The cross context VM structure.
1772 */
1773int pgmR3PhysRamPreAllocate(PVM pVM)
1774{
1775 Assert(pVM->pgm.s.fRamPreAlloc);
1776 Log(("pgmR3PhysRamPreAllocate: enter\n"));
1777
1778 /*
1779 * Walk the RAM ranges and allocate all RAM pages, halt at
1780 * the first allocation error.
1781 */
1782 uint64_t cPages = 0;
1783 uint64_t NanoTS = RTTimeNanoTS();
1784 pgmLock(pVM);
1785 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
1786 {
1787 PPGMPAGE pPage = &pRam->aPages[0];
1788 RTGCPHYS GCPhys = pRam->GCPhys;
1789 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
1790 while (cLeft-- > 0)
1791 {
1792 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1793 {
1794 switch (PGM_PAGE_GET_STATE(pPage))
1795 {
1796 case PGM_PAGE_STATE_ZERO:
1797 {
1798 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
1799 if (RT_FAILURE(rc))
1800 {
1801 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
1802 pgmUnlock(pVM);
1803 return rc;
1804 }
1805 cPages++;
1806 break;
1807 }
1808
1809 case PGM_PAGE_STATE_BALLOONED:
1810 case PGM_PAGE_STATE_ALLOCATED:
1811 case PGM_PAGE_STATE_WRITE_MONITORED:
1812 case PGM_PAGE_STATE_SHARED:
1813 /* nothing to do here. */
1814 break;
1815 }
1816 }
1817
1818 /* next */
1819 pPage++;
1820 GCPhys += PAGE_SIZE;
1821 }
1822 }
1823 pgmUnlock(pVM);
1824 NanoTS = RTTimeNanoTS() - NanoTS;
1825
1826 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
1827 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
1828 return VINF_SUCCESS;
1829}
1830
1831
1832/**
1833 * Checks shared page checksums.
1834 *
1835 * @param pVM The cross context VM structure.
1836 */
1837void pgmR3PhysAssertSharedPageChecksums(PVM pVM)
1838{
1839#ifdef VBOX_STRICT
1840 pgmLock(pVM);
1841
1842 if (pVM->pgm.s.cSharedPages > 0)
1843 {
1844 /*
1845 * Walk the ram ranges.
1846 */
1847 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
1848 {
1849 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
1850 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
1851
1852 while (iPage-- > 0)
1853 {
1854 PPGMPAGE pPage = &pRam->aPages[iPage];
1855 if (PGM_PAGE_IS_SHARED(pPage))
1856 {
1857 uint32_t u32Checksum = pPage->s.u2Unused0 | ((uint32_t)pPage->s.u2Unused1 << 8);
1858 if (!u32Checksum)
1859 {
1860 RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1861 void const *pvPage;
1862 int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhysPage, &pvPage);
1863 if (RT_SUCCESS(rc))
1864 {
1865 uint32_t u32Checksum2 = RTCrc32(pvPage, PAGE_SIZE);
1866# if 0
1867 AssertMsg((u32Checksum2 & UINT32_C(0x00000303)) == u32Checksum, ("GCPhysPage=%RGp\n", GCPhysPage));
1868# else
1869 if ((u32Checksum2 & UINT32_C(0x00000303)) == u32Checksum)
1870 LogFlow(("shpg %#x @ %RGp %#x [OK]\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
1871 else
1872 AssertMsgFailed(("shpg %#x @ %RGp %#x\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
1873# endif
1874 }
1875 else
1876 AssertRC(rc);
1877 }
1878 }
1879
1880 } /* for each page */
1881
1882 } /* for each ram range */
1883 }
1884
1885 pgmUnlock(pVM);
1886#endif /* VBOX_STRICT */
1887 NOREF(pVM);
1888}
1889
1890
1891/**
1892 * Resets the physical memory state.
1893 *
1894 * ASSUMES that the caller owns the PGM lock.
1895 *
1896 * @returns VBox status code.
1897 * @param pVM The cross context VM structure.
1898 */
1899int pgmR3PhysRamReset(PVM pVM)
1900{
1901 PGM_LOCK_ASSERT_OWNER(pVM);
1902
1903 /* Reset the memory balloon. */
1904 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
1905 AssertRC(rc);
1906
1907#ifdef VBOX_WITH_PAGE_SHARING
1908 /* Clear all registered shared modules. */
1909 pgmR3PhysAssertSharedPageChecksums(pVM);
1910 rc = GMMR3ResetSharedModules(pVM);
1911 AssertRC(rc);
1912#endif
1913 /* Reset counters. */
1914 pVM->pgm.s.cReusedSharedPages = 0;
1915 pVM->pgm.s.cBalloonedPages = 0;
1916
1917 return VINF_SUCCESS;
1918}
1919
1920
1921/**
1922 * Resets (zeros) the RAM after all devices and components have been reset.
1923 *
1924 * ASSUMES that the caller owns the PGM lock.
1925 *
1926 * @returns VBox status code.
1927 * @param pVM The cross context VM structure.
1928 */
1929int pgmR3PhysRamZeroAll(PVM pVM)
1930{
1931 PGM_LOCK_ASSERT_OWNER(pVM);
1932
1933 /*
1934 * We batch up pages that should be freed instead of calling GMM for
1935 * each and every one of them.
1936 */
1937 uint32_t cPendingPages = 0;
1938 PGMMFREEPAGESREQ pReq;
1939 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1940 AssertLogRelRCReturn(rc, rc);
1941
1942 /*
1943 * Walk the ram ranges.
1944 */
1945 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
1946 {
1947 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
1948 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
1949
1950 if ( !pVM->pgm.s.fRamPreAlloc
1951 && pVM->pgm.s.fZeroRamPagesOnReset)
1952 {
1953 /* Replace all RAM pages by ZERO pages. */
1954 while (iPage-- > 0)
1955 {
1956 PPGMPAGE pPage = &pRam->aPages[iPage];
1957 switch (PGM_PAGE_GET_TYPE(pPage))
1958 {
1959 case PGMPAGETYPE_RAM:
1960 /* Do not replace pages part of a 2 MB continuous range
1961 with zero pages, but zero them instead. */
1962 if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
1963 || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
1964 {
1965 void *pvPage;
1966 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
1967 AssertLogRelRCReturn(rc, rc);
1968 ASMMemZeroPage(pvPage);
1969 }
1970 else if (PGM_PAGE_IS_BALLOONED(pPage))
1971 {
1972 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
1973 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1974 }
1975 else if (!PGM_PAGE_IS_ZERO(pPage))
1976 {
1977 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1978 AssertLogRelRCReturn(rc, rc);
1979 }
1980 break;
1981
1982 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1983 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
1984 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
1985 true /*fDoAccounting*/);
1986 break;
1987
1988 case PGMPAGETYPE_MMIO2:
1989 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
1990 case PGMPAGETYPE_ROM:
1991 case PGMPAGETYPE_MMIO:
1992 break;
1993 default:
1994 AssertFailed();
1995 }
1996 } /* for each page */
1997 }
1998 else
1999 {
2000 /* Zero the memory. */
2001 while (iPage-- > 0)
2002 {
2003 PPGMPAGE pPage = &pRam->aPages[iPage];
2004 switch (PGM_PAGE_GET_TYPE(pPage))
2005 {
2006 case PGMPAGETYPE_RAM:
2007 switch (PGM_PAGE_GET_STATE(pPage))
2008 {
2009 case PGM_PAGE_STATE_ZERO:
2010 break;
2011
2012 case PGM_PAGE_STATE_BALLOONED:
2013 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
2014 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2015 break;
2016
2017 case PGM_PAGE_STATE_SHARED:
2018 case PGM_PAGE_STATE_WRITE_MONITORED:
2019 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
2020 AssertLogRelRCReturn(rc, rc);
2021 /* no break */
2022
2023 case PGM_PAGE_STATE_ALLOCATED:
2024 if (pVM->pgm.s.fZeroRamPagesOnReset)
2025 {
2026 void *pvPage;
2027 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
2028 AssertLogRelRCReturn(rc, rc);
2029 ASMMemZeroPage(pvPage);
2030 }
2031 break;
2032 }
2033 break;
2034
2035 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2036 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
2037 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2038 true /*fDoAccounting*/);
2039 break;
2040
2041 case PGMPAGETYPE_MMIO2:
2042 case PGMPAGETYPE_ROM_SHADOW:
2043 case PGMPAGETYPE_ROM:
2044 case PGMPAGETYPE_MMIO:
2045 break;
2046 default:
2047 AssertFailed();
2048
2049 }
2050 } /* for each page */
2051 }
2052
2053 }
2054
2055 /*
2056 * Finish off any pages pending freeing.
2057 */
2058 if (cPendingPages)
2059 {
2060 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2061 AssertLogRelRCReturn(rc, rc);
2062 }
2063 GMMR3FreePagesCleanup(pReq);
2064 return VINF_SUCCESS;
2065}
2066
2067
2068/**
2069 * Frees all RAM during VM termination
2070 *
2071 * ASSUMES that the caller owns the PGM lock.
2072 *
2073 * @returns VBox status code.
2074 * @param pVM The cross context VM structure.
2075 */
2076int pgmR3PhysRamTerm(PVM pVM)
2077{
2078 PGM_LOCK_ASSERT_OWNER(pVM);
2079
2080 /* Reset the memory balloon. */
2081 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
2082 AssertRC(rc);
2083
2084#ifdef VBOX_WITH_PAGE_SHARING
2085 /*
2086 * Clear all registered shared modules.
2087 */
2088 pgmR3PhysAssertSharedPageChecksums(pVM);
2089 rc = GMMR3ResetSharedModules(pVM);
2090 AssertRC(rc);
2091
2092 /*
2093 * Flush the handy pages updates to make sure no shared pages are hiding
2094 * in there. (No unlikely if the VM shuts down, apparently.)
2095 */
2096 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_FLUSH_HANDY_PAGES, 0, NULL);
2097#endif
2098
2099 /*
2100 * We batch up pages that should be freed instead of calling GMM for
2101 * each and every one of them.
2102 */
2103 uint32_t cPendingPages = 0;
2104 PGMMFREEPAGESREQ pReq;
2105 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2106 AssertLogRelRCReturn(rc, rc);
2107
2108 /*
2109 * Walk the ram ranges.
2110 */
2111 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2112 {
2113 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2114 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2115
2116 while (iPage-- > 0)
2117 {
2118 PPGMPAGE pPage = &pRam->aPages[iPage];
2119 switch (PGM_PAGE_GET_TYPE(pPage))
2120 {
2121 case PGMPAGETYPE_RAM:
2122 /* Free all shared pages. Private pages are automatically freed during GMM VM cleanup. */
2123 /** @todo change this to explicitly free private pages here. */
2124 if (PGM_PAGE_IS_SHARED(pPage))
2125 {
2126 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
2127 AssertLogRelRCReturn(rc, rc);
2128 }
2129 break;
2130
2131 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2132 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
2133 case PGMPAGETYPE_MMIO2:
2134 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2135 case PGMPAGETYPE_ROM:
2136 case PGMPAGETYPE_MMIO:
2137 break;
2138 default:
2139 AssertFailed();
2140 }
2141 } /* for each page */
2142 }
2143
2144 /*
2145 * Finish off any pages pending freeing.
2146 */
2147 if (cPendingPages)
2148 {
2149 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2150 AssertLogRelRCReturn(rc, rc);
2151 }
2152 GMMR3FreePagesCleanup(pReq);
2153 return VINF_SUCCESS;
2154}
2155
2156
2157/**
2158 * This is the interface IOM is using to register an MMIO region.
2159 *
2160 * It will check for conflicts and ensure that a RAM range structure
2161 * is present before calling the PGMR3HandlerPhysicalRegister API to
2162 * register the callbacks.
2163 *
2164 * @returns VBox status code.
2165 *
2166 * @param pVM The cross context VM structure.
2167 * @param GCPhys The start of the MMIO region.
2168 * @param cb The size of the MMIO region.
2169 * @param hType The physical access handler type registration.
2170 * @param pvUserR3 The user argument for R3.
2171 * @param pvUserR0 The user argument for R0.
2172 * @param pvUserRC The user argument for RC.
2173 * @param pszDesc The description of the MMIO region.
2174 */
2175VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType,
2176 RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, const char *pszDesc)
2177{
2178 /*
2179 * Assert on some assumption.
2180 */
2181 VM_ASSERT_EMT(pVM);
2182 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2183 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2184 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2185 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2186 Assert(((PPGMPHYSHANDLERTYPEINT)MMHyperHeapOffsetToPtr(pVM, hType))->enmKind == PGMPHYSHANDLERKIND_MMIO);
2187
2188 int rc = pgmLock(pVM);
2189 AssertRCReturn(rc, rc);
2190
2191 /*
2192 * Make sure there's a RAM range structure for the region.
2193 */
2194 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2195 bool fRamExists = false;
2196 PPGMRAMRANGE pRamPrev = NULL;
2197 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2198 while (pRam && GCPhysLast >= pRam->GCPhys)
2199 {
2200 if ( GCPhysLast >= pRam->GCPhys
2201 && GCPhys <= pRam->GCPhysLast)
2202 {
2203 /* Simplification: all within the same range. */
2204 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
2205 && GCPhysLast <= pRam->GCPhysLast,
2206 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
2207 GCPhys, GCPhysLast, pszDesc,
2208 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2209 pgmUnlock(pVM),
2210 VERR_PGM_RAM_CONFLICT);
2211
2212 /* Check that it's all RAM or MMIO pages. */
2213 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2214 uint32_t cLeft = cb >> PAGE_SHIFT;
2215 while (cLeft-- > 0)
2216 {
2217 AssertLogRelMsgReturnStmt( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
2218 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
2219 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
2220 GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
2221 pgmUnlock(pVM),
2222 VERR_PGM_RAM_CONFLICT);
2223 pPage++;
2224 }
2225
2226 /* Looks good. */
2227 fRamExists = true;
2228 break;
2229 }
2230
2231 /* next */
2232 pRamPrev = pRam;
2233 pRam = pRam->pNextR3;
2234 }
2235 PPGMRAMRANGE pNew;
2236 if (fRamExists)
2237 {
2238 pNew = NULL;
2239
2240 /*
2241 * Make all the pages in the range MMIO/ZERO pages, freeing any
2242 * RAM pages currently mapped here. This might not be 100% correct
2243 * for PCI memory, but we're doing the same thing for MMIO2 pages.
2244 */
2245 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
2246 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
2247
2248 /* Force a PGM pool flush as guest ram references have been changed. */
2249 /** @todo not entirely SMP safe; assuming for now the guest takes
2250 * care of this internally (not touch mapped mmio while changing the
2251 * mapping). */
2252 PVMCPU pVCpu = VMMGetCpu(pVM);
2253 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2254 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2255 }
2256 else
2257 {
2258
2259 /*
2260 * No RAM range, insert an ad hoc one.
2261 *
2262 * Note that we don't have to tell REM about this range because
2263 * PGMHandlerPhysicalRegisterEx will do that for us.
2264 */
2265 Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
2266
2267 const uint32_t cPages = cb >> PAGE_SHIFT;
2268 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
2269 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
2270 AssertLogRelMsgRCReturnStmt(rc, ("cbRamRange=%zu\n", cbRamRange), pgmUnlock(pVM), rc);
2271
2272 /* Initialize the range. */
2273 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
2274 pNew->pSelfRC = MMHyperCCToRC(pVM, pNew);
2275 pNew->GCPhys = GCPhys;
2276 pNew->GCPhysLast = GCPhysLast;
2277 pNew->cb = cb;
2278 pNew->pszDesc = pszDesc;
2279 pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
2280 pNew->pvR3 = NULL;
2281 pNew->paLSPages = NULL;
2282
2283 uint32_t iPage = cPages;
2284 while (iPage-- > 0)
2285 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
2286 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
2287
2288 /* update the page count stats. */
2289 pVM->pgm.s.cPureMmioPages += cPages;
2290 pVM->pgm.s.cAllPages += cPages;
2291
2292 /* link it */
2293 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
2294 }
2295
2296 /*
2297 * Register the access handler.
2298 */
2299 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc);
2300 if ( RT_FAILURE(rc)
2301 && !fRamExists)
2302 {
2303 pVM->pgm.s.cPureMmioPages -= cb >> PAGE_SHIFT;
2304 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
2305
2306 /* remove the ad hoc range. */
2307 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
2308 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
2309 MMHyperFree(pVM, pRam);
2310 }
2311 pgmPhysInvalidatePageMapTLB(pVM);
2312
2313 pgmUnlock(pVM);
2314 return rc;
2315}
2316
2317
2318/**
2319 * This is the interface IOM is using to register an MMIO region.
2320 *
2321 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
2322 * any ad hoc PGMRAMRANGE left behind.
2323 *
2324 * @returns VBox status code.
2325 * @param pVM The cross context VM structure.
2326 * @param GCPhys The start of the MMIO region.
2327 * @param cb The size of the MMIO region.
2328 */
2329VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
2330{
2331 VM_ASSERT_EMT(pVM);
2332
2333 int rc = pgmLock(pVM);
2334 AssertRCReturn(rc, rc);
2335
2336 /*
2337 * First deregister the handler, then check if we should remove the ram range.
2338 */
2339 rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2340 if (RT_SUCCESS(rc))
2341 {
2342 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2343 PPGMRAMRANGE pRamPrev = NULL;
2344 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2345 while (pRam && GCPhysLast >= pRam->GCPhys)
2346 {
2347 /** @todo We're being a bit too careful here. rewrite. */
2348 if ( GCPhysLast == pRam->GCPhysLast
2349 && GCPhys == pRam->GCPhys)
2350 {
2351 Assert(pRam->cb == cb);
2352
2353 /*
2354 * See if all the pages are dead MMIO pages.
2355 */
2356 uint32_t const cPages = cb >> PAGE_SHIFT;
2357 bool fAllMMIO = true;
2358 uint32_t iPage = 0;
2359 uint32_t cLeft = cPages;
2360 while (cLeft-- > 0)
2361 {
2362 PPGMPAGE pPage = &pRam->aPages[iPage];
2363 if ( !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
2364 /*|| not-out-of-action later */)
2365 {
2366 fAllMMIO = false;
2367 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2368 break;
2369 }
2370 Assert( PGM_PAGE_IS_ZERO(pPage)
2371 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
2372 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
2373 pPage++;
2374 }
2375 if (fAllMMIO)
2376 {
2377 /*
2378 * Ad-hoc range, unlink and free it.
2379 */
2380 Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
2381 GCPhys, GCPhysLast, pRam->pszDesc));
2382
2383 pVM->pgm.s.cAllPages -= cPages;
2384 pVM->pgm.s.cPureMmioPages -= cPages;
2385
2386 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
2387 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
2388 MMHyperFree(pVM, pRam);
2389 break;
2390 }
2391 }
2392
2393 /*
2394 * Range match? It will all be within one range (see PGMAllHandler.cpp).
2395 */
2396 if ( GCPhysLast >= pRam->GCPhys
2397 && GCPhys <= pRam->GCPhysLast)
2398 {
2399 Assert(GCPhys >= pRam->GCPhys);
2400 Assert(GCPhysLast <= pRam->GCPhysLast);
2401
2402 /*
2403 * Turn the pages back into RAM pages.
2404 */
2405 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2406 uint32_t cLeft = cb >> PAGE_SHIFT;
2407 while (cLeft--)
2408 {
2409 PPGMPAGE pPage = &pRam->aPages[iPage];
2410 AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage))
2411 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
2412 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
2413 ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2414 if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage))
2415 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
2416 }
2417 break;
2418 }
2419
2420 /* next */
2421 pRamPrev = pRam;
2422 pRam = pRam->pNextR3;
2423 }
2424 }
2425
2426 /* Force a PGM pool flush as guest ram references have been changed. */
2427 /** @todo Not entirely SMP safe; assuming for now the guest takes care of
2428 * this internally (not touch mapped mmio while changing the mapping). */
2429 PVMCPU pVCpu = VMMGetCpu(pVM);
2430 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2431 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2432
2433 pgmPhysInvalidatePageMapTLB(pVM);
2434 pgmPhysInvalidRamRangeTlbs(pVM);
2435 pgmUnlock(pVM);
2436 return rc;
2437}
2438
2439
2440/**
2441 * Locate a MMIO2 range.
2442 *
2443 * @returns Pointer to the MMIO2 range.
2444 * @param pVM The cross context VM structure.
2445 * @param pDevIns The device instance owning the region.
2446 * @param iRegion The region.
2447 */
2448DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
2449{
2450 /*
2451 * Search the list.
2452 */
2453 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
2454 if ( pCur->pDevInsR3 == pDevIns
2455 && pCur->iRegion == iRegion)
2456 return pCur;
2457 return NULL;
2458}
2459
2460
2461/**
2462 * Allocate and register an MMIO2 region.
2463 *
2464 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM
2465 * associated with a device. It is also non-shared memory with a permanent
2466 * ring-3 mapping and page backing (presently).
2467 *
2468 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
2469 * the VM, in which case we'll drop the base memory pages. Presently we will
2470 * make no attempt to preserve anything that happens to be present in the base
2471 * memory that is replaced, this is of course incorrect but it's too much
2472 * effort.
2473 *
2474 * @returns VBox status code.
2475 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
2476 * memory.
2477 * @retval VERR_ALREADY_EXISTS if the region already exists.
2478 *
2479 * @param pVM The cross context VM structure.
2480 * @param pDevIns The device instance owning the region.
2481 * @param iRegion The region number. If the MMIO2 memory is a PCI
2482 * I/O region this number has to be the number of that
2483 * region. Otherwise it can be any number safe
2484 * UINT8_MAX.
2485 * @param cb The size of the region. Must be page aligned.
2486 * @param fFlags Reserved for future use, must be zero.
2487 * @param ppv Where to store the pointer to the ring-3 mapping of
2488 * the memory.
2489 * @param pszDesc The description.
2490 */
2491VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags,
2492 void **ppv, const char *pszDesc)
2493{
2494 /*
2495 * Validate input.
2496 */
2497 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2498 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2499 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2500 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
2501 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2502 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2503 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
2504 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2505 AssertReturn(cb, VERR_INVALID_PARAMETER);
2506 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2507
2508 const uint32_t cPages = cb >> PAGE_SHIFT;
2509 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
2510 AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_NO_MEMORY);
2511
2512 /*
2513 * For the 2nd+ instance, mangle the description string so it's unique.
2514 */
2515 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
2516 {
2517 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
2518 if (!pszDesc)
2519 return VERR_NO_MEMORY;
2520 }
2521
2522 /*
2523 * Allocate an MMIO2 range ID (not freed on failure).
2524 * The zero ID is not used as it could be confused with NIL_GMM_PAGEID.
2525 */
2526 pgmLock(pVM);
2527 uint8_t idMmio2 = pVM->pgm.s.cMmio2Regions + 1;
2528 if (idMmio2 > PGM_MMIO2_MAX_RANGES)
2529 {
2530 pgmUnlock(pVM);
2531 AssertLogRelFailedReturn(VERR_PGM_TOO_MANY_MMIO2_RANGES);
2532 }
2533 pVM->pgm.s.cMmio2Regions = idMmio2;
2534 pgmUnlock(pVM);
2535
2536 /*
2537 * Try reserve and allocate the backing memory first as this is what is
2538 * most likely to fail.
2539 */
2540 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
2541 if (RT_SUCCESS(rc))
2542 {
2543 void *pvPages;
2544 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
2545 if (RT_SUCCESS(rc))
2546 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
2547 if (RT_SUCCESS(rc))
2548 {
2549 memset(pvPages, 0, cPages * PAGE_SIZE);
2550
2551 /*
2552 * Create the MMIO2 range record for it.
2553 */
2554 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
2555 PPGMMMIO2RANGE pNew;
2556 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
2557 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
2558 if (RT_SUCCESS(rc))
2559 {
2560 pNew->pDevInsR3 = pDevIns;
2561 pNew->pvR3 = pvPages;
2562 //pNew->pNext = NULL;
2563 //pNew->fMapped = false;
2564 //pNew->fOverlapping = false;
2565 pNew->iRegion = iRegion;
2566 pNew->idSavedState = UINT8_MAX;
2567 pNew->idMmio2 = idMmio2;
2568 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
2569 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange);
2570 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
2571 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
2572 pNew->RamRange.pszDesc = pszDesc;
2573 pNew->RamRange.cb = cb;
2574 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2;
2575 pNew->RamRange.pvR3 = pvPages;
2576 //pNew->RamRange.paLSPages = NULL;
2577
2578 uint32_t iPage = cPages;
2579 while (iPage-- > 0)
2580 {
2581 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
2582 paPages[iPage].Phys,
2583 PGM_MMIO2_PAGEID_MAKE(idMmio2, iPage),
2584 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
2585 }
2586
2587 /* update page count stats */
2588 pVM->pgm.s.cAllPages += cPages;
2589 pVM->pgm.s.cPrivatePages += cPages;
2590
2591 /*
2592 * Link it into the list.
2593 * Since there is no particular order, just push it.
2594 */
2595 /** @todo we can save us the linked list now, just search the lookup table... */
2596 pgmLock(pVM);
2597 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
2598 Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
2599 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
2600 pVM->pgm.s.pMmio2RangesR3 = pNew;
2601 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
2602 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = MMHyperCCToR0(pVM, pNew);
2603 pgmUnlock(pVM);
2604
2605 *ppv = pvPages;
2606 RTMemTmpFree(paPages);
2607 pgmPhysInvalidatePageMapTLB(pVM);
2608 return VINF_SUCCESS;
2609 }
2610
2611 SUPR3PageFreeEx(pvPages, cPages);
2612 }
2613 RTMemTmpFree(paPages);
2614 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
2615 }
2616 if (pDevIns->iInstance > 0)
2617 MMR3HeapFree((void *)pszDesc);
2618 return rc;
2619}
2620
2621
2622/**
2623 * Deregisters and frees an MMIO2 region.
2624 *
2625 * Any physical (and virtual) access handlers registered for the region must
2626 * be deregistered before calling this function.
2627 *
2628 * @returns VBox status code.
2629 * @param pVM The cross context VM structure.
2630 * @param pDevIns The device instance owning the region.
2631 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
2632 */
2633VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
2634{
2635 /*
2636 * Validate input.
2637 */
2638 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2639 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2640 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
2641
2642 pgmLock(pVM);
2643 int rc = VINF_SUCCESS;
2644 unsigned cFound = 0;
2645 PPGMMMIO2RANGE pPrev = NULL;
2646 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
2647 while (pCur)
2648 {
2649 if ( pCur->pDevInsR3 == pDevIns
2650 && ( iRegion == UINT32_MAX
2651 || pCur->iRegion == iRegion))
2652 {
2653 cFound++;
2654
2655 /*
2656 * Unmap it if it's mapped.
2657 */
2658 if (pCur->fMapped)
2659 {
2660 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
2661 AssertRC(rc2);
2662 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
2663 rc = rc2;
2664 }
2665
2666 /*
2667 * Unlink it
2668 */
2669 PPGMMMIO2RANGE pNext = pCur->pNextR3;
2670 if (pPrev)
2671 pPrev->pNextR3 = pNext;
2672 else
2673 pVM->pgm.s.pMmio2RangesR3 = pNext;
2674 pCur->pNextR3 = NULL;
2675
2676 uint8_t idMmio2 = pCur->idMmio2;
2677 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
2678 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
2679 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
2680
2681 /*
2682 * Free the memory.
2683 */
2684 int rc2 = SUPR3PageFreeEx(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
2685 AssertRC(rc2);
2686 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
2687 rc = rc2;
2688
2689 uint32_t const cPages = pCur->RamRange.cb >> PAGE_SHIFT;
2690 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
2691 AssertRC(rc2);
2692 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
2693 rc = rc2;
2694
2695 /* we're leaking hyper memory here if done at runtime. */
2696#ifdef VBOX_STRICT
2697 VMSTATE const enmState = VMR3GetState(pVM);
2698 AssertMsg( enmState == VMSTATE_POWERING_OFF
2699 || enmState == VMSTATE_POWERING_OFF_LS
2700 || enmState == VMSTATE_OFF
2701 || enmState == VMSTATE_OFF_LS
2702 || enmState == VMSTATE_DESTROYING
2703 || enmState == VMSTATE_TERMINATED
2704 || enmState == VMSTATE_CREATING
2705 , ("%s\n", VMR3GetStateName(enmState)));
2706#endif
2707 /*rc = MMHyperFree(pVM, pCur);
2708 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
2709
2710
2711 /* update page count stats */
2712 pVM->pgm.s.cAllPages -= cPages;
2713 pVM->pgm.s.cPrivatePages -= cPages;
2714
2715 /* next */
2716 pCur = pNext;
2717 }
2718 else
2719 {
2720 pPrev = pCur;
2721 pCur = pCur->pNextR3;
2722 }
2723 }
2724 pgmPhysInvalidatePageMapTLB(pVM);
2725 pgmUnlock(pVM);
2726 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
2727}
2728
2729
2730/**
2731 * Maps a MMIO2 region.
2732 *
2733 * This is done when a guest / the bios / state loading changes the
2734 * PCI config. The replacing of base memory has the same restrictions
2735 * as during registration, of course.
2736 *
2737 * @returns VBox status code.
2738 *
2739 * @param pVM The cross context VM structure.
2740 * @param pDevIns The device instance owning the region.
2741 * @param iRegion The index of the registered region.
2742 * @param GCPhys The guest-physical address to be remapped.
2743 */
2744VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2745{
2746 /*
2747 * Validate input
2748 */
2749 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2750 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2751 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2752 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2753 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2754 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2755
2756 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2757 AssertReturn(pCur, VERR_NOT_FOUND);
2758 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
2759 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
2760 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
2761
2762 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
2763 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2764
2765 /*
2766 * Find our location in the ram range list, checking for
2767 * restriction we don't bother implementing yet (partially overlapping).
2768 */
2769 bool fRamExists = false;
2770 PPGMRAMRANGE pRamPrev = NULL;
2771 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2772 while (pRam && GCPhysLast >= pRam->GCPhys)
2773 {
2774 if ( GCPhys <= pRam->GCPhysLast
2775 && GCPhysLast >= pRam->GCPhys)
2776 {
2777 /* completely within? */
2778 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
2779 && GCPhysLast <= pRam->GCPhysLast,
2780 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
2781 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
2782 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2783 VERR_PGM_RAM_CONFLICT);
2784 fRamExists = true;
2785 break;
2786 }
2787
2788 /* next */
2789 pRamPrev = pRam;
2790 pRam = pRam->pNextR3;
2791 }
2792 if (fRamExists)
2793 {
2794 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2795 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2796 while (cPagesLeft-- > 0)
2797 {
2798 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
2799 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
2800 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
2801 VERR_PGM_RAM_CONFLICT);
2802 pPage++;
2803 }
2804 }
2805 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
2806 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
2807
2808 /*
2809 * Make the changes.
2810 */
2811 pgmLock(pVM);
2812
2813 pCur->RamRange.GCPhys = GCPhys;
2814 pCur->RamRange.GCPhysLast = GCPhysLast;
2815 pCur->fMapped = true;
2816 pCur->fOverlapping = fRamExists;
2817
2818 if (fRamExists)
2819 {
2820/** @todo use pgmR3PhysFreePageRange here. */
2821 uint32_t cPendingPages = 0;
2822 PGMMFREEPAGESREQ pReq;
2823 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2824 AssertLogRelRCReturn(rc, rc);
2825
2826 /* replace the pages, freeing all present RAM pages. */
2827 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
2828 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2829 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2830 while (cPagesLeft-- > 0)
2831 {
2832 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
2833 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
2834
2835 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
2836 uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
2837 PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
2838 PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
2839 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
2840 PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
2841 PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
2842 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
2843 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
2844
2845 pVM->pgm.s.cZeroPages--;
2846 GCPhys += PAGE_SIZE;
2847 pPageSrc++;
2848 pPageDst++;
2849 }
2850
2851 /* Flush physical page map TLB. */
2852 pgmPhysInvalidatePageMapTLB(pVM);
2853
2854 if (cPendingPages)
2855 {
2856 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2857 AssertLogRelRCReturn(rc, rc);
2858 }
2859 GMMR3FreePagesCleanup(pReq);
2860
2861 /* Force a PGM pool flush as guest ram references have been changed. */
2862 /** @todo not entirely SMP safe; assuming for now the guest takes care of
2863 * this internally (not touch mapped mmio while changing the mapping). */
2864 PVMCPU pVCpu = VMMGetCpu(pVM);
2865 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2866 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2867
2868 pgmUnlock(pVM);
2869 }
2870 else
2871 {
2872 RTGCPHYS cb = pCur->RamRange.cb;
2873
2874 /* Clear the tracking data of pages we're going to reactivate. */
2875 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
2876 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2877 while (cPagesLeft-- > 0)
2878 {
2879 PGM_PAGE_SET_TRACKING(pVM, pPageSrc, 0);
2880 PGM_PAGE_SET_PTE_INDEX(pVM, pPageSrc, 0);
2881 pPageSrc++;
2882 }
2883
2884 /* link in the ram range */
2885 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
2886 pgmUnlock(pVM);
2887
2888#ifdef VBOX_WITH_REM
2889 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
2890#endif
2891 }
2892
2893 pgmPhysInvalidatePageMapTLB(pVM);
2894 return VINF_SUCCESS;
2895}
2896
2897
2898/**
2899 * Unmaps a MMIO2 region.
2900 *
2901 * This is done when a guest / the bios / state loading changes the
2902 * PCI config. The replacing of base memory has the same restrictions
2903 * as during registration, of course.
2904 */
2905VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
2906{
2907 /*
2908 * Validate input
2909 */
2910 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2911 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2912 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2913 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
2914 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
2915 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2916
2917 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
2918 AssertReturn(pCur, VERR_NOT_FOUND);
2919 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
2920 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
2921 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
2922
2923 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
2924 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
2925
2926 /*
2927 * Unmap it.
2928 */
2929 pgmLock(pVM);
2930
2931#ifdef VBOX_WITH_REM
2932 RTGCPHYS GCPhysRangeREM;
2933 RTGCPHYS cbRangeREM;
2934 bool fInformREM;
2935#endif
2936 if (pCur->fOverlapping)
2937 {
2938 /* Restore the RAM pages we've replaced. */
2939 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2940 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
2941 pRam = pRam->pNextR3;
2942
2943 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2944 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
2945 while (cPagesLeft-- > 0)
2946 {
2947 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
2948 pVM->pgm.s.cZeroPages++;
2949 pPageDst++;
2950 }
2951
2952 /* Flush physical page map TLB. */
2953 pgmPhysInvalidatePageMapTLB(pVM);
2954#ifdef VBOX_WITH_REM
2955 GCPhysRangeREM = NIL_RTGCPHYS; /* shuts up gcc */
2956 cbRangeREM = RTGCPHYS_MAX; /* ditto */
2957 fInformREM = false;
2958#endif
2959 }
2960 else
2961 {
2962#ifdef VBOX_WITH_REM
2963 GCPhysRangeREM = pCur->RamRange.GCPhys;
2964 cbRangeREM = pCur->RamRange.cb;
2965 fInformREM = true;
2966#endif
2967 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
2968 }
2969
2970 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
2971 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
2972 pCur->fOverlapping = false;
2973 pCur->fMapped = false;
2974
2975 /* Force a PGM pool flush as guest ram references have been changed. */
2976 /** @todo not entirely SMP safe; assuming for now the guest takes care
2977 * of this internally (not touch mapped mmio while changing the
2978 * mapping). */
2979 PVMCPU pVCpu = VMMGetCpu(pVM);
2980 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2981 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2982
2983 pgmPhysInvalidatePageMapTLB(pVM);
2984 pgmPhysInvalidRamRangeTlbs(pVM);
2985 pgmUnlock(pVM);
2986
2987#ifdef VBOX_WITH_REM
2988 if (fInformREM)
2989 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeREM, cbRangeREM);
2990#endif
2991
2992 return VINF_SUCCESS;
2993}
2994
2995
2996/**
2997 * Checks if the given address is an MMIO2 base address or not.
2998 *
2999 * @returns true/false accordingly.
3000 * @param pVM The cross context VM structure.
3001 * @param pDevIns The owner of the memory, optional.
3002 * @param GCPhys The address to check.
3003 */
3004VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
3005{
3006 /*
3007 * Validate input
3008 */
3009 VM_ASSERT_EMT_RETURN(pVM, false);
3010 AssertPtrReturn(pDevIns, false);
3011 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
3012 AssertReturn(GCPhys != 0, false);
3013 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
3014
3015 /*
3016 * Search the list.
3017 */
3018 pgmLock(pVM);
3019 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
3020 if (pCur->RamRange.GCPhys == GCPhys)
3021 {
3022 Assert(pCur->fMapped);
3023 pgmUnlock(pVM);
3024 return true;
3025 }
3026 pgmUnlock(pVM);
3027 return false;
3028}
3029
3030
3031/**
3032 * Gets the HC physical address of a page in the MMIO2 region.
3033 *
3034 * This is API is intended for MMHyper and shouldn't be called
3035 * by anyone else...
3036 *
3037 * @returns VBox status code.
3038 * @param pVM The cross context VM structure.
3039 * @param pDevIns The owner of the memory, optional.
3040 * @param iRegion The region.
3041 * @param off The page expressed an offset into the MMIO2 region.
3042 * @param pHCPhys Where to store the result.
3043 */
3044VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
3045{
3046 /*
3047 * Validate input
3048 */
3049 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3050 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3051 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3052
3053 pgmLock(pVM);
3054 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
3055 AssertReturn(pCur, VERR_NOT_FOUND);
3056 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
3057
3058 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
3059 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
3060 pgmUnlock(pVM);
3061 return VINF_SUCCESS;
3062}
3063
3064
3065/**
3066 * Maps a portion of an MMIO2 region into kernel space (host).
3067 *
3068 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
3069 * or the VM is terminated.
3070 *
3071 * @return VBox status code.
3072 *
3073 * @param pVM The cross context VM structure.
3074 * @param pDevIns The device owning the MMIO2 memory.
3075 * @param iRegion The region.
3076 * @param off The offset into the region. Must be page aligned.
3077 * @param cb The number of bytes to map. Must be page aligned.
3078 * @param pszDesc Mapping description.
3079 * @param pR0Ptr Where to store the R0 address.
3080 */
3081VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
3082 const char *pszDesc, PRTR0PTR pR0Ptr)
3083{
3084 /*
3085 * Validate input.
3086 */
3087 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3088 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3089 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3090
3091 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
3092 AssertReturn(pCur, VERR_NOT_FOUND);
3093 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
3094 AssertReturn(cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
3095 AssertReturn(off + cb <= pCur->RamRange.cb, VERR_INVALID_PARAMETER);
3096 NOREF(pszDesc);
3097
3098 /*
3099 * Pass the request on to the support library/driver.
3100 */
3101 int rc = SUPR3PageMapKernel(pCur->pvR3, off, cb, 0, pR0Ptr);
3102
3103 return rc;
3104}
3105
3106
3107/**
3108 * Worker for PGMR3PhysRomRegister.
3109 *
3110 * This is here to simplify lock management, i.e. the caller does all the
3111 * locking and we can simply return without needing to remember to unlock
3112 * anything first.
3113 *
3114 * @returns VBox status code.
3115 * @param pVM The cross context VM structure.
3116 * @param pDevIns The device instance owning the ROM.
3117 * @param GCPhys First physical address in the range.
3118 * Must be page aligned!
3119 * @param cb The size of the range (in bytes).
3120 * Must be page aligned!
3121 * @param pvBinary Pointer to the binary data backing the ROM image.
3122 * @param cbBinary The size of the binary data pvBinary points to.
3123 * This must be less or equal to @a cb.
3124 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
3125 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
3126 * @param pszDesc Pointer to description string. This must not be freed.
3127 */
3128static int pgmR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
3129 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
3130{
3131 /*
3132 * Validate input.
3133 */
3134 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3135 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
3136 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
3137 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
3138 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
3139 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
3140 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
3141 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
3142 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
3143
3144 const uint32_t cPages = cb >> PAGE_SHIFT;
3145
3146 /*
3147 * Find the ROM location in the ROM list first.
3148 */
3149 PPGMROMRANGE pRomPrev = NULL;
3150 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
3151 while (pRom && GCPhysLast >= pRom->GCPhys)
3152 {
3153 if ( GCPhys <= pRom->GCPhysLast
3154 && GCPhysLast >= pRom->GCPhys)
3155 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
3156 GCPhys, GCPhysLast, pszDesc,
3157 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
3158 VERR_PGM_RAM_CONFLICT);
3159 /* next */
3160 pRomPrev = pRom;
3161 pRom = pRom->pNextR3;
3162 }
3163
3164 /*
3165 * Find the RAM location and check for conflicts.
3166 *
3167 * Conflict detection is a bit different than for RAM
3168 * registration since a ROM can be located within a RAM
3169 * range. So, what we have to check for is other memory
3170 * types (other than RAM that is) and that we don't span
3171 * more than one RAM range (layz).
3172 */
3173 bool fRamExists = false;
3174 PPGMRAMRANGE pRamPrev = NULL;
3175 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
3176 while (pRam && GCPhysLast >= pRam->GCPhys)
3177 {
3178 if ( GCPhys <= pRam->GCPhysLast
3179 && GCPhysLast >= pRam->GCPhys)
3180 {
3181 /* completely within? */
3182 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
3183 && GCPhysLast <= pRam->GCPhysLast,
3184 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
3185 GCPhys, GCPhysLast, pszDesc,
3186 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
3187 VERR_PGM_RAM_CONFLICT);
3188 fRamExists = true;
3189 break;
3190 }
3191
3192 /* next */
3193 pRamPrev = pRam;
3194 pRam = pRam->pNextR3;
3195 }
3196 if (fRamExists)
3197 {
3198 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3199 uint32_t cPagesLeft = cPages;
3200 while (cPagesLeft-- > 0)
3201 {
3202 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
3203 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
3204 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
3205 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
3206 Assert(PGM_PAGE_IS_ZERO(pPage));
3207 pPage++;
3208 }
3209 }
3210
3211 /*
3212 * Update the base memory reservation if necessary.
3213 */
3214 uint32_t cExtraBaseCost = fRamExists ? 0 : cPages;
3215 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3216 cExtraBaseCost += cPages;
3217 if (cExtraBaseCost)
3218 {
3219 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
3220 if (RT_FAILURE(rc))
3221 return rc;
3222 }
3223
3224 /*
3225 * Allocate memory for the virgin copy of the RAM.
3226 */
3227 PGMMALLOCATEPAGESREQ pReq;
3228 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
3229 AssertRCReturn(rc, rc);
3230
3231 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3232 {
3233 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
3234 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
3235 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
3236 }
3237
3238 rc = GMMR3AllocatePagesPerform(pVM, pReq);
3239 if (RT_FAILURE(rc))
3240 {
3241 GMMR3AllocatePagesCleanup(pReq);
3242 return rc;
3243 }
3244
3245 /*
3246 * Allocate the new ROM range and RAM range (if necessary).
3247 */
3248 PPGMROMRANGE pRomNew;
3249 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
3250 if (RT_SUCCESS(rc))
3251 {
3252 PPGMRAMRANGE pRamNew = NULL;
3253 if (!fRamExists)
3254 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
3255 if (RT_SUCCESS(rc))
3256 {
3257 /*
3258 * Initialize and insert the RAM range (if required).
3259 */
3260 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
3261 if (!fRamExists)
3262 {
3263 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
3264 pRamNew->pSelfRC = MMHyperCCToRC(pVM, pRamNew);
3265 pRamNew->GCPhys = GCPhys;
3266 pRamNew->GCPhysLast = GCPhysLast;
3267 pRamNew->cb = cb;
3268 pRamNew->pszDesc = pszDesc;
3269 pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
3270 pRamNew->pvR3 = NULL;
3271 pRamNew->paLSPages = NULL;
3272
3273 PPGMPAGE pPage = &pRamNew->aPages[0];
3274 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
3275 {
3276 PGM_PAGE_INIT(pPage,
3277 pReq->aPages[iPage].HCPhysGCPhys,
3278 pReq->aPages[iPage].idPage,
3279 PGMPAGETYPE_ROM,
3280 PGM_PAGE_STATE_ALLOCATED);
3281
3282 pRomPage->Virgin = *pPage;
3283 }
3284
3285 pVM->pgm.s.cAllPages += cPages;
3286 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
3287 }
3288 else
3289 {
3290 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3291 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
3292 {
3293 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_ROM);
3294 PGM_PAGE_SET_HCPHYS(pVM, pPage, pReq->aPages[iPage].HCPhysGCPhys);
3295 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
3296 PGM_PAGE_SET_PAGEID(pVM, pPage, pReq->aPages[iPage].idPage);
3297 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
3298 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
3299 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
3300
3301 pRomPage->Virgin = *pPage;
3302 }
3303
3304 pRamNew = pRam;
3305
3306 pVM->pgm.s.cZeroPages -= cPages;
3307 }
3308 pVM->pgm.s.cPrivatePages += cPages;
3309
3310 /* Flush physical page map TLB. */
3311 pgmPhysInvalidatePageMapTLB(pVM);
3312
3313
3314 /*
3315 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
3316 *
3317 * If it's shadowed we'll register the handler after the ROM notification
3318 * so we get the access handler callbacks that we should. If it isn't
3319 * shadowed we'll do it the other way around to make REM use the built-in
3320 * ROM behavior and not the handler behavior (which is to route all access
3321 * to PGM atm).
3322 */
3323 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3324 {
3325#ifdef VBOX_WITH_REM
3326 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
3327#endif
3328 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType,
3329 pRomNew, MMHyperCCToR0(pVM, pRomNew), MMHyperCCToRC(pVM, pRomNew),
3330 pszDesc);
3331 }
3332 else
3333 {
3334 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType,
3335 pRomNew, MMHyperCCToR0(pVM, pRomNew), MMHyperCCToRC(pVM, pRomNew),
3336 pszDesc);
3337#ifdef VBOX_WITH_REM
3338 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
3339#endif
3340 }
3341 if (RT_SUCCESS(rc))
3342 {
3343 /*
3344 * Copy the image over to the virgin pages.
3345 * This must be done after linking in the RAM range.
3346 */
3347 size_t cbBinaryLeft = cbBinary;
3348 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
3349 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
3350 {
3351 void *pvDstPage;
3352 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pvDstPage);
3353 if (RT_FAILURE(rc))
3354 {
3355 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
3356 break;
3357 }
3358 if (cbBinaryLeft >= PAGE_SIZE)
3359 {
3360 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), PAGE_SIZE);
3361 cbBinaryLeft -= PAGE_SIZE;
3362 }
3363 else
3364 {
3365 ASMMemZeroPage(pvDstPage); /* (shouldn't be necessary, but can't hurt either) */
3366 if (cbBinaryLeft > 0)
3367 {
3368 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), cbBinaryLeft);
3369 cbBinaryLeft = 0;
3370 }
3371 }
3372 }
3373 if (RT_SUCCESS(rc))
3374 {
3375 /*
3376 * Initialize the ROM range.
3377 * Note that the Virgin member of the pages has already been initialized above.
3378 */
3379 pRomNew->GCPhys = GCPhys;
3380 pRomNew->GCPhysLast = GCPhysLast;
3381 pRomNew->cb = cb;
3382 pRomNew->fFlags = fFlags;
3383 pRomNew->idSavedState = UINT8_MAX;
3384 pRomNew->cbOriginal = cbBinary;
3385 pRomNew->pszDesc = pszDesc;
3386 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY
3387 ? pvBinary : RTMemDup(pvBinary, cbBinary);
3388 if (pRomNew->pvOriginal)
3389 {
3390 for (unsigned iPage = 0; iPage < cPages; iPage++)
3391 {
3392 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
3393 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
3394 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
3395 }
3396
3397 /* update the page count stats for the shadow pages. */
3398 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3399 {
3400 pVM->pgm.s.cZeroPages += cPages;
3401 pVM->pgm.s.cAllPages += cPages;
3402 }
3403
3404 /*
3405 * Insert the ROM range, tell REM and return successfully.
3406 */
3407 pRomNew->pNextR3 = pRom;
3408 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
3409 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
3410
3411 if (pRomPrev)
3412 {
3413 pRomPrev->pNextR3 = pRomNew;
3414 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
3415 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
3416 }
3417 else
3418 {
3419 pVM->pgm.s.pRomRangesR3 = pRomNew;
3420 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
3421 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
3422 }
3423
3424 pgmPhysInvalidatePageMapTLB(pVM);
3425 GMMR3AllocatePagesCleanup(pReq);
3426 return VINF_SUCCESS;
3427 }
3428
3429 /* bail out */
3430 rc = VERR_NO_MEMORY;
3431 }
3432
3433 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
3434 AssertRC(rc2);
3435 }
3436
3437 if (!fRamExists)
3438 {
3439 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
3440 MMHyperFree(pVM, pRamNew);
3441 }
3442 }
3443 MMHyperFree(pVM, pRomNew);
3444 }
3445
3446 /** @todo Purge the mapping cache or something... */
3447 GMMR3FreeAllocatedPages(pVM, pReq);
3448 GMMR3AllocatePagesCleanup(pReq);
3449 return rc;
3450}
3451
3452
3453/**
3454 * Registers a ROM image.
3455 *
3456 * Shadowed ROM images requires double the amount of backing memory, so,
3457 * don't use that unless you have to. Shadowing of ROM images is process
3458 * where we can select where the reads go and where the writes go. On real
3459 * hardware the chipset provides means to configure this. We provide
3460 * PGMR3PhysProtectROM() for this purpose.
3461 *
3462 * A read-only copy of the ROM image will always be kept around while we
3463 * will allocate RAM pages for the changes on demand (unless all memory
3464 * is configured to be preallocated).
3465 *
3466 * @returns VBox status code.
3467 * @param pVM The cross context VM structure.
3468 * @param pDevIns The device instance owning the ROM.
3469 * @param GCPhys First physical address in the range.
3470 * Must be page aligned!
3471 * @param cb The size of the range (in bytes).
3472 * Must be page aligned!
3473 * @param pvBinary Pointer to the binary data backing the ROM image.
3474 * @param cbBinary The size of the binary data pvBinary points to.
3475 * This must be less or equal to @a cb.
3476 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
3477 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
3478 * @param pszDesc Pointer to description string. This must not be freed.
3479 *
3480 * @remark There is no way to remove the rom, automatically on device cleanup or
3481 * manually from the device yet. This isn't difficult in any way, it's
3482 * just not something we expect to be necessary for a while.
3483 */
3484VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
3485 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
3486{
3487 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p cbBinary=%#x fFlags=%#x pszDesc=%s\n",
3488 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc));
3489 pgmLock(pVM);
3490 int rc = pgmR3PhysRomRegister(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc);
3491 pgmUnlock(pVM);
3492 return rc;
3493}
3494
3495
3496/**
3497 * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify
3498 * that the virgin part is untouched.
3499 *
3500 * This is done after the normal memory has been cleared.
3501 *
3502 * ASSUMES that the caller owns the PGM lock.
3503 *
3504 * @param pVM The cross context VM structure.
3505 */
3506int pgmR3PhysRomReset(PVM pVM)
3507{
3508 PGM_LOCK_ASSERT_OWNER(pVM);
3509 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
3510 {
3511 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
3512
3513 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
3514 {
3515 /*
3516 * Reset the physical handler.
3517 */
3518 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
3519 AssertRCReturn(rc, rc);
3520
3521 /*
3522 * What we do with the shadow pages depends on the memory
3523 * preallocation option. If not enabled, we'll just throw
3524 * out all the dirty pages and replace them by the zero page.
3525 */
3526 if (!pVM->pgm.s.fRamPreAlloc)
3527 {
3528 /* Free the dirty pages. */
3529 uint32_t cPendingPages = 0;
3530 PGMMFREEPAGESREQ pReq;
3531 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
3532 AssertRCReturn(rc, rc);
3533
3534 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3535 if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
3536 && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
3537 {
3538 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
3539 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow,
3540 pRom->GCPhys + (iPage << PAGE_SHIFT));
3541 AssertLogRelRCReturn(rc, rc);
3542 }
3543
3544 if (cPendingPages)
3545 {
3546 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
3547 AssertLogRelRCReturn(rc, rc);
3548 }
3549 GMMR3FreePagesCleanup(pReq);
3550 }
3551 else
3552 {
3553 /* clear all the shadow pages. */
3554 for (uint32_t iPage = 0; iPage < cPages; iPage++)
3555 {
3556 if (PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow))
3557 continue;
3558 Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
3559 void *pvDstPage;
3560 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
3561 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
3562 if (RT_FAILURE(rc))
3563 break;
3564 ASMMemZeroPage(pvDstPage);
3565 }
3566 AssertRCReturn(rc, rc);
3567 }
3568 }
3569
3570 /*
3571 * Restore the original ROM pages after a saved state load.
3572 * Also, in strict builds check that ROM pages remain unmodified.
3573 */
3574#ifndef VBOX_STRICT
3575 if (pVM->pgm.s.fRestoreRomPagesOnReset)
3576#endif
3577 {
3578 size_t cbSrcLeft = pRom->cbOriginal;
3579 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
3580 uint32_t cRestored = 0;
3581 for (uint32_t iPage = 0; iPage < cPages && cbSrcLeft > 0; iPage++, pbSrcPage += PAGE_SIZE)
3582 {
3583 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
3584 void const *pvDstPage;
3585 int rc = pgmPhysPageMapReadOnly(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPage);
3586 if (RT_FAILURE(rc))
3587 break;
3588
3589 if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE)))
3590 {
3591 if (pVM->pgm.s.fRestoreRomPagesOnReset)
3592 {
3593 void *pvDstPageW;
3594 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPageW);
3595 AssertLogRelRCReturn(rc, rc);
3596 memcpy(pvDstPageW, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE));
3597 cRestored++;
3598 }
3599 else
3600 LogRel(("pgmR3PhysRomReset: %RGp: ROM page changed (%s)\n", GCPhys, pRom->pszDesc));
3601 }
3602 cbSrcLeft -= RT_MIN(cbSrcLeft, PAGE_SIZE);
3603 }
3604 if (cRestored > 0)
3605 LogRel(("PGM: ROM \"%s\": Reloaded %u of %u pages.\n", pRom->pszDesc, cRestored, cPages));
3606 }
3607 }
3608
3609 /* Clear the ROM restore flag now as we only need to do this once after
3610 loading saved state. */
3611 pVM->pgm.s.fRestoreRomPagesOnReset = false;
3612
3613 return VINF_SUCCESS;
3614}
3615
3616
3617/**
3618 * Called by PGMR3Term to free resources.
3619 *
3620 * ASSUMES that the caller owns the PGM lock.
3621 *
3622 * @param pVM The cross context VM structure.
3623 */
3624void pgmR3PhysRomTerm(PVM pVM)
3625{
3626 /*
3627 * Free the heap copy of the original bits.
3628 */
3629 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
3630 {
3631 if ( pRom->pvOriginal
3632 && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY))
3633 {
3634 RTMemFree((void *)pRom->pvOriginal);
3635 pRom->pvOriginal = NULL;
3636 }
3637 }
3638}
3639
3640
3641/**
3642 * Change the shadowing of a range of ROM pages.
3643 *
3644 * This is intended for implementing chipset specific memory registers
3645 * and will not be very strict about the input. It will silently ignore
3646 * any pages that are not the part of a shadowed ROM.
3647 *
3648 * @returns VBox status code.
3649 * @retval VINF_PGM_SYNC_CR3
3650 *
3651 * @param pVM The cross context VM structure.
3652 * @param GCPhys Where to start. Page aligned.
3653 * @param cb How much to change. Page aligned.
3654 * @param enmProt The new ROM protection.
3655 */
3656VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
3657{
3658 /*
3659 * Check input
3660 */
3661 if (!cb)
3662 return VINF_SUCCESS;
3663 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3664 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3665 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
3666 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
3667 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
3668
3669 /*
3670 * Process the request.
3671 */
3672 pgmLock(pVM);
3673 int rc = VINF_SUCCESS;
3674 bool fFlushTLB = false;
3675 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
3676 {
3677 if ( GCPhys <= pRom->GCPhysLast
3678 && GCPhysLast >= pRom->GCPhys
3679 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
3680 {
3681 /*
3682 * Iterate the relevant pages and make necessary the changes.
3683 */
3684 bool fChanges = false;
3685 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
3686 ? pRom->cb >> PAGE_SHIFT
3687 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
3688 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
3689 iPage < cPages;
3690 iPage++)
3691 {
3692 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
3693 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
3694 {
3695 fChanges = true;
3696
3697 /* flush references to the page. */
3698 PPGMPAGE pRamPage = pgmPhysGetPage(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT));
3699 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage,
3700 true /*fFlushPTEs*/, &fFlushTLB);
3701 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
3702 rc = rc2;
3703
3704 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
3705 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
3706
3707 *pOld = *pRamPage;
3708 *pRamPage = *pNew;
3709 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
3710 }
3711 pRomPage->enmProt = enmProt;
3712 }
3713
3714 /*
3715 * Reset the access handler if we made changes, no need
3716 * to optimize this.
3717 */
3718 if (fChanges)
3719 {
3720 int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
3721 if (RT_FAILURE(rc2))
3722 {
3723 pgmUnlock(pVM);
3724 AssertRC(rc);
3725 return rc2;
3726 }
3727 }
3728
3729 /* Advance - cb isn't updated. */
3730 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
3731 }
3732 }
3733 pgmUnlock(pVM);
3734 if (fFlushTLB)
3735 PGM_INVL_ALL_VCPU_TLBS(pVM);
3736
3737 return rc;
3738}
3739
3740
3741/**
3742 * Sets the Address Gate 20 state.
3743 *
3744 * @param pVCpu The cross context virtual CPU structure.
3745 * @param fEnable True if the gate should be enabled.
3746 * False if the gate should be disabled.
3747 */
3748VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
3749{
3750 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
3751 if (pVCpu->pgm.s.fA20Enabled != fEnable)
3752 {
3753 pVCpu->pgm.s.fA20Enabled = fEnable;
3754 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
3755#ifdef VBOX_WITH_REM
3756 REMR3A20Set(pVCpu->pVMR3, pVCpu, fEnable);
3757#endif
3758#ifdef PGM_WITH_A20
3759 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
3760 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3761 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3762 HMFlushTLB(pVCpu);
3763#endif
3764 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
3765 }
3766}
3767
3768
3769/**
3770 * Tree enumeration callback for dealing with age rollover.
3771 * It will perform a simple compression of the current age.
3772 */
3773static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
3774{
3775 /* Age compression - ASSUMES iNow == 4. */
3776 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3777 if (pChunk->iLastUsed >= UINT32_C(0xffffff00))
3778 pChunk->iLastUsed = 3;
3779 else if (pChunk->iLastUsed >= UINT32_C(0xfffff000))
3780 pChunk->iLastUsed = 2;
3781 else if (pChunk->iLastUsed)
3782 pChunk->iLastUsed = 1;
3783 else /* iLastUsed = 0 */
3784 pChunk->iLastUsed = 4;
3785
3786 NOREF(pvUser);
3787 return 0;
3788}
3789
3790
3791/**
3792 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
3793 */
3794typedef struct PGMR3PHYSCHUNKUNMAPCB
3795{
3796 PVM pVM; /**< Pointer to the VM. */
3797 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
3798} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
3799
3800
3801/**
3802 * Callback used to find the mapping that's been unused for
3803 * the longest time.
3804 */
3805static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLU32NODECORE pNode, void *pvUser)
3806{
3807 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
3808 PPGMR3PHYSCHUNKUNMAPCB pArg = (PPGMR3PHYSCHUNKUNMAPCB)pvUser;
3809
3810 /*
3811 * Check for locks and compare when last used.
3812 */
3813 if (pChunk->cRefs)
3814 return 0;
3815 if (pChunk->cPermRefs)
3816 return 0;
3817 if ( pArg->pChunk
3818 && pChunk->iLastUsed >= pArg->pChunk->iLastUsed)
3819 return 0;
3820
3821 /*
3822 * Check that it's not in any of the TLBs.
3823 */
3824 PVM pVM = pArg->pVM;
3825 if ( pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(pChunk->Core.Key)].idChunk
3826 == pChunk->Core.Key)
3827 {
3828 pChunk = NULL;
3829 return 0;
3830 }
3831#ifdef VBOX_STRICT
3832 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
3833 {
3834 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk != pChunk);
3835 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key);
3836 }
3837#endif
3838
3839 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
3840 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
3841 return 0;
3842
3843 pArg->pChunk = pChunk;
3844 return 0;
3845}
3846
3847
3848/**
3849 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
3850 *
3851 * The candidate will not be part of any TLBs, so no need to flush
3852 * anything afterwards.
3853 *
3854 * @returns Chunk id.
3855 * @param pVM The cross context VM structure.
3856 */
3857static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
3858{
3859 PGM_LOCK_ASSERT_OWNER(pVM);
3860
3861 /*
3862 * Enumerate the age tree starting with the left most node.
3863 */
3864 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
3865 PGMR3PHYSCHUNKUNMAPCB Args;
3866 Args.pVM = pVM;
3867 Args.pChunk = NULL;
3868 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, &Args);
3869 Assert(Args.pChunk);
3870 if (Args.pChunk)
3871 {
3872 Assert(Args.pChunk->cRefs == 0);
3873 Assert(Args.pChunk->cPermRefs == 0);
3874 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
3875 return Args.pChunk->Core.Key;
3876 }
3877
3878 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
3879 return INT32_MAX;
3880}
3881
3882
3883/**
3884 * Rendezvous callback used by pgmR3PhysUnmapChunk that unmaps a chunk
3885 *
3886 * This is only called on one of the EMTs while the other ones are waiting for
3887 * it to complete this function.
3888 *
3889 * @returns VINF_SUCCESS (VBox strict status code).
3890 * @param pVM The cross context VM structure.
3891 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
3892 * @param pvUser User pointer. Unused
3893 *
3894 */
3895static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysUnmapChunkRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
3896{
3897 int rc = VINF_SUCCESS;
3898 pgmLock(pVM);
3899 NOREF(pVCpu); NOREF(pvUser);
3900
3901 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
3902 {
3903 /* Flush the pgm pool cache; call the internal rendezvous handler as we're already in a rendezvous handler here. */
3904 /** @todo also not really efficient to unmap a chunk that contains PD
3905 * or PT pages. */
3906 pgmR3PoolClearAllRendezvous(pVM, &pVM->aCpus[0], NULL /* no need to flush the REM TLB as we already did that above */);
3907
3908 /*
3909 * Request the ring-0 part to unmap a chunk to make space in the mapping cache.
3910 */
3911 GMMMAPUNMAPCHUNKREQ Req;
3912 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
3913 Req.Hdr.cbReq = sizeof(Req);
3914 Req.pvR3 = NULL;
3915 Req.idChunkMap = NIL_GMM_CHUNKID;
3916 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
3917 if (Req.idChunkUnmap != INT32_MAX)
3918 {
3919 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
3920 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
3921 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
3922 if (RT_SUCCESS(rc))
3923 {
3924 /*
3925 * Remove the unmapped one.
3926 */
3927 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
3928 AssertRelease(pUnmappedChunk);
3929 AssertRelease(!pUnmappedChunk->cRefs);
3930 AssertRelease(!pUnmappedChunk->cPermRefs);
3931 pUnmappedChunk->pv = NULL;
3932 pUnmappedChunk->Core.Key = UINT32_MAX;
3933#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
3934 MMR3HeapFree(pUnmappedChunk);
3935#else
3936 MMR3UkHeapFree(pVM, pUnmappedChunk, MM_TAG_PGM_CHUNK_MAPPING);
3937#endif
3938 pVM->pgm.s.ChunkR3Map.c--;
3939 pVM->pgm.s.cUnmappedChunks++;
3940
3941 /*
3942 * Flush dangling PGM pointers (R3 & R0 ptrs to GC physical addresses).
3943 */
3944 /** @todo We should not flush chunks which include cr3 mappings. */
3945 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3946 {
3947 PPGMCPU pPGM = &pVM->aCpus[idCpu].pgm.s;
3948
3949 pPGM->pGst32BitPdR3 = NULL;
3950 pPGM->pGstPaePdptR3 = NULL;
3951 pPGM->pGstAmd64Pml4R3 = NULL;
3952#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
3953 pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
3954 pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
3955 pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
3956#endif
3957 for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++)
3958 {
3959 pPGM->apGstPaePDsR3[i] = NULL;
3960#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
3961 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
3962#endif
3963 }
3964
3965 /* Flush REM TLBs. */
3966 CPUMSetChangedFlags(&pVM->aCpus[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
3967 }
3968#ifdef VBOX_WITH_REM
3969 /* Flush REM translation blocks. */
3970 REMFlushTBs(pVM);
3971#endif
3972 }
3973 }
3974 }
3975 pgmUnlock(pVM);
3976 return rc;
3977}
3978
3979/**
3980 * Unmap a chunk to free up virtual address space (request packet handler for pgmR3PhysChunkMap)
3981 *
3982 * @returns VBox status code.
3983 * @param pVM The cross context VM structure.
3984 */
3985void pgmR3PhysUnmapChunk(PVM pVM)
3986{
3987 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysUnmapChunkRendezvous, NULL);
3988 AssertRC(rc);
3989}
3990
3991
3992/**
3993 * Maps the given chunk into the ring-3 mapping cache.
3994 *
3995 * This will call ring-0.
3996 *
3997 * @returns VBox status code.
3998 * @param pVM The cross context VM structure.
3999 * @param idChunk The chunk in question.
4000 * @param ppChunk Where to store the chunk tracking structure.
4001 *
4002 * @remarks Called from within the PGM critical section.
4003 * @remarks Can be called from any thread!
4004 */
4005int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
4006{
4007 int rc;
4008
4009 PGM_LOCK_ASSERT_OWNER(pVM);
4010
4011 /*
4012 * Move the chunk time forward.
4013 */
4014 pVM->pgm.s.ChunkR3Map.iNow++;
4015 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
4016 {
4017 pVM->pgm.s.ChunkR3Map.iNow = 4;
4018 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
4019 }
4020
4021 /*
4022 * Allocate a new tracking structure first.
4023 */
4024#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
4025 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
4026#else
4027 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
4028#endif
4029 AssertReturn(pChunk, VERR_NO_MEMORY);
4030 pChunk->Core.Key = idChunk;
4031 pChunk->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
4032
4033 /*
4034 * Request the ring-0 part to map the chunk in question.
4035 */
4036 GMMMAPUNMAPCHUNKREQ Req;
4037 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
4038 Req.Hdr.cbReq = sizeof(Req);
4039 Req.pvR3 = NULL;
4040 Req.idChunkMap = idChunk;
4041 Req.idChunkUnmap = NIL_GMM_CHUNKID;
4042
4043 /* Must be callable from any thread, so can't use VMMR3CallR0. */
4044 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
4045 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
4046 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
4047 if (RT_SUCCESS(rc))
4048 {
4049 pChunk->pv = Req.pvR3;
4050
4051 /*
4052 * If we're running out of virtual address space, then we should
4053 * unmap another chunk.
4054 *
4055 * Currently, an unmap operation requires that all other virtual CPUs
4056 * are idling and not by chance making use of the memory we're
4057 * unmapping. So, we create an async unmap operation here.
4058 *
4059 * Now, when creating or restoring a saved state this wont work very
4060 * well since we may want to restore all guest RAM + a little something.
4061 * So, we have to do the unmap synchronously. Fortunately for us
4062 * though, during these operations the other virtual CPUs are inactive
4063 * and it should be safe to do this.
4064 */
4065 /** @todo Eventually we should lock all memory when used and do
4066 * map+unmap as one kernel call without any rendezvous or
4067 * other precautions. */
4068 if (pVM->pgm.s.ChunkR3Map.c + 1 >= pVM->pgm.s.ChunkR3Map.cMax)
4069 {
4070 switch (VMR3GetState(pVM))
4071 {
4072 case VMSTATE_LOADING:
4073 case VMSTATE_SAVING:
4074 {
4075 PVMCPU pVCpu = VMMGetCpu(pVM);
4076 if ( pVCpu
4077 && pVM->pgm.s.cDeprecatedPageLocks == 0)
4078 {
4079 pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
4080 break;
4081 }
4082 /* fall thru */
4083 }
4084 default:
4085 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
4086 AssertRC(rc);
4087 break;
4088 }
4089 }
4090
4091 /*
4092 * Update the tree. We must do this after any unmapping to make sure
4093 * the chunk we're going to return isn't unmapped by accident.
4094 */
4095 AssertPtr(Req.pvR3);
4096 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
4097 AssertRelease(fRc);
4098 pVM->pgm.s.ChunkR3Map.c++;
4099 pVM->pgm.s.cMappedChunks++;
4100 }
4101 else
4102 {
4103 /** @todo this may fail because of /proc/sys/vm/max_map_count, so we
4104 * should probably restrict ourselves on linux. */
4105 AssertRC(rc);
4106#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
4107 MMR3HeapFree(pChunk);
4108#else
4109 MMR3UkHeapFree(pVM, pChunk, MM_TAG_PGM_CHUNK_MAPPING);
4110#endif
4111 pChunk = NULL;
4112 }
4113
4114 *ppChunk = pChunk;
4115 return rc;
4116}
4117
4118
4119/**
4120 * For VMMCALLRING3_PGM_MAP_CHUNK, considered internal.
4121 *
4122 * @returns see pgmR3PhysChunkMap.
4123 * @param pVM The cross context VM structure.
4124 * @param idChunk The chunk to map.
4125 */
4126VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
4127{
4128 PPGMCHUNKR3MAP pChunk;
4129 int rc;
4130
4131 pgmLock(pVM);
4132 rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
4133 pgmUnlock(pVM);
4134 return rc;
4135}
4136
4137
4138/**
4139 * Invalidates the TLB for the ring-3 mapping cache.
4140 *
4141 * @param pVM The cross context VM structure.
4142 */
4143VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
4144{
4145 pgmLock(pVM);
4146 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
4147 {
4148 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
4149 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
4150 }
4151 /* The page map TLB references chunks, so invalidate that one too. */
4152 pgmPhysInvalidatePageMapTLB(pVM);
4153 pgmUnlock(pVM);
4154}
4155
4156
4157/**
4158 * Response to VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE to allocate a large
4159 * (2MB) page for use with a nested paging PDE.
4160 *
4161 * @returns The following VBox status codes.
4162 * @retval VINF_SUCCESS on success.
4163 * @retval VINF_EM_NO_MEMORY if we're out of memory.
4164 *
4165 * @param pVM The cross context VM structure.
4166 * @param GCPhys GC physical start address of the 2 MB range
4167 */
4168VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
4169{
4170#ifdef PGM_WITH_LARGE_PAGES
4171 uint64_t u64TimeStamp1, u64TimeStamp2;
4172
4173 pgmLock(pVM);
4174
4175 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
4176 u64TimeStamp1 = RTTimeMilliTS();
4177 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE, 0, NULL);
4178 u64TimeStamp2 = RTTimeMilliTS();
4179 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
4180 if (RT_SUCCESS(rc))
4181 {
4182 Assert(pVM->pgm.s.cLargeHandyPages == 1);
4183
4184 uint32_t idPage = pVM->pgm.s.aLargeHandyPage[0].idPage;
4185 RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys;
4186
4187 void *pv;
4188
4189 /* Map the large page into our address space.
4190 *
4191 * Note: assuming that within the 2 MB range:
4192 * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise)
4193 * - user space mapping is continuous as well
4194 * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE)
4195 */
4196 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
4197 AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n", idPage, HCPhys, rc));
4198
4199 if (RT_SUCCESS(rc))
4200 {
4201 /*
4202 * Clear the pages.
4203 */
4204 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
4205 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
4206 {
4207 ASMMemZeroPage(pv);
4208
4209 PPGMPAGE pPage;
4210 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4211 AssertRC(rc);
4212
4213 Assert(PGM_PAGE_IS_ZERO(pPage));
4214 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
4215 pVM->pgm.s.cZeroPages--;
4216
4217 /*
4218 * Do the PGMPAGE modifications.
4219 */
4220 pVM->pgm.s.cPrivatePages++;
4221 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
4222 PGM_PAGE_SET_PAGEID(pVM, pPage, idPage);
4223 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
4224 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PDE);
4225 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
4226 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
4227
4228 /* Somewhat dirty assumption that page ids are increasing. */
4229 idPage++;
4230
4231 HCPhys += PAGE_SIZE;
4232 GCPhys += PAGE_SIZE;
4233
4234 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
4235
4236 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
4237 }
4238 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
4239
4240 /* Flush all TLBs. */
4241 PGM_INVL_ALL_VCPU_TLBS(pVM);
4242 pgmPhysInvalidatePageMapTLB(pVM);
4243 }
4244 pVM->pgm.s.cLargeHandyPages = 0;
4245 }
4246
4247 if (RT_SUCCESS(rc))
4248 {
4249 static uint32_t cTimeOut = 0;
4250 uint64_t u64TimeStampDelta = u64TimeStamp2 - u64TimeStamp1;
4251
4252 if (u64TimeStampDelta > 100)
4253 {
4254 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatLargePageOverflow);
4255 if ( ++cTimeOut > 10
4256 || u64TimeStampDelta > 1000 /* more than one second forces an early retirement from allocating large pages. */)
4257 {
4258 /* If repeated attempts to allocate a large page takes more than 100 ms, then we fall back to normal 4k pages.
4259 * E.g. Vista 64 tries to move memory around, which takes a huge amount of time.
4260 */
4261 LogRel(("PGMR3PhysAllocateLargePage: allocating large pages takes too long (last attempt %d ms; nr of timeouts %d); DISABLE\n", u64TimeStampDelta, cTimeOut));
4262 PGMSetLargePageUsage(pVM, false);
4263 }
4264 }
4265 else
4266 if (cTimeOut > 0)
4267 cTimeOut--;
4268 }
4269
4270 pgmUnlock(pVM);
4271 return rc;
4272#else
4273 return VERR_NOT_IMPLEMENTED;
4274#endif /* PGM_WITH_LARGE_PAGES */
4275}
4276
4277
4278/**
4279 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES.
4280 *
4281 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
4282 * signal and clear the out of memory condition. When contracted, this API is
4283 * used to try clear the condition when the user wants to resume.
4284 *
4285 * @returns The following VBox status codes.
4286 * @retval VINF_SUCCESS on success. FFs cleared.
4287 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
4288 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
4289 *
4290 * @param pVM The cross context VM structure.
4291 *
4292 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
4293 * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
4294 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
4295 * handler.
4296 */
4297VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
4298{
4299 pgmLock(pVM);
4300
4301 /*
4302 * Allocate more pages, noting down the index of the first new page.
4303 */
4304 uint32_t iClear = pVM->pgm.s.cHandyPages;
4305 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_PGM_HANDY_PAGE_IPE);
4306 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
4307 int rcAlloc = VINF_SUCCESS;
4308 int rcSeed = VINF_SUCCESS;
4309 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
4310 while (rc == VERR_GMM_SEED_ME)
4311 {
4312 void *pvChunk;
4313 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
4314 if (RT_SUCCESS(rc))
4315 {
4316 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
4317 if (RT_FAILURE(rc))
4318 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
4319 }
4320 if (RT_SUCCESS(rc))
4321 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
4322 }
4323
4324 /* todo: we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
4325 if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
4326 && pVM->pgm.s.cHandyPages > 0)
4327 {
4328 /* Still handy pages left, so don't panic. */
4329 rc = VINF_SUCCESS;
4330 }
4331
4332 if (RT_SUCCESS(rc))
4333 {
4334 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
4335 Assert(pVM->pgm.s.cHandyPages > 0);
4336 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
4337 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
4338
4339#ifdef VBOX_STRICT
4340 uint32_t i;
4341 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++)
4342 if ( pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID
4343 || pVM->pgm.s.aHandyPages[i].idSharedPage != NIL_GMM_PAGEID
4344 || (pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & PAGE_OFFSET_MASK))
4345 break;
4346 if (i != pVM->pgm.s.cHandyPages)
4347 {
4348 RTAssertMsg1Weak(NULL, __LINE__, __FILE__, __FUNCTION__);
4349 RTAssertMsg2Weak("i=%d iClear=%d cHandyPages=%d\n", i, iClear, pVM->pgm.s.cHandyPages);
4350 for (uint32_t j = iClear; j < pVM->pgm.s.cHandyPages; j++)
4351 RTAssertMsg2Add(("%03d: idPage=%d HCPhysGCPhys=%RHp idSharedPage=%d%\n", j,
4352 pVM->pgm.s.aHandyPages[j].idPage,
4353 pVM->pgm.s.aHandyPages[j].HCPhysGCPhys,
4354 pVM->pgm.s.aHandyPages[j].idSharedPage,
4355 j == i ? " <---" : ""));
4356 RTAssertPanic();
4357 }
4358#endif
4359 /*
4360 * Clear the pages.
4361 */
4362 while (iClear < pVM->pgm.s.cHandyPages)
4363 {
4364 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
4365 void *pv;
4366 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
4367 AssertLogRelMsgBreak(RT_SUCCESS(rc),
4368 ("%u/%u: idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n",
4369 iClear, pVM->pgm.s.cHandyPages, pPage->idPage, pPage->HCPhysGCPhys, rc));
4370 ASMMemZeroPage(pv);
4371 iClear++;
4372 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
4373 }
4374 }
4375 else
4376 {
4377 uint64_t cAllocPages, cMaxPages, cBalloonPages;
4378
4379 /*
4380 * We should never get here unless there is a genuine shortage of
4381 * memory (or some internal error). Flag the error so the VM can be
4382 * suspended ASAP and the user informed. If we're totally out of
4383 * handy pages we will return failure.
4384 */
4385 /* Report the failure. */
4386 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
4387 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
4388 rc, rcAlloc, rcSeed,
4389 pVM->pgm.s.cHandyPages,
4390 pVM->pgm.s.cAllPages,
4391 pVM->pgm.s.cPrivatePages,
4392 pVM->pgm.s.cSharedPages,
4393 pVM->pgm.s.cZeroPages));
4394
4395 if (GMMR3QueryMemoryStats(pVM, &cAllocPages, &cMaxPages, &cBalloonPages) == VINF_SUCCESS)
4396 {
4397 LogRel(("GMM: Statistics:\n"
4398 " Allocated pages: %RX64\n"
4399 " Maximum pages: %RX64\n"
4400 " Ballooned pages: %RX64\n", cAllocPages, cMaxPages, cBalloonPages));
4401 }
4402
4403 if ( rc != VERR_NO_MEMORY
4404 && rc != VERR_NO_PHYS_MEMORY
4405 && rc != VERR_LOCK_FAILED)
4406 {
4407 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
4408 {
4409 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
4410 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
4411 pVM->pgm.s.aHandyPages[i].idSharedPage));
4412 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
4413 if (idPage != NIL_GMM_PAGEID)
4414 {
4415 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
4416 pRam;
4417 pRam = pRam->pNextR3)
4418 {
4419 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
4420 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4421 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
4422 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
4423 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
4424 }
4425 }
4426 }
4427 }
4428
4429 /* Set the FFs and adjust rc. */
4430 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
4431 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
4432 if ( rc == VERR_NO_MEMORY
4433 || rc == VERR_NO_PHYS_MEMORY
4434 || rc == VERR_LOCK_FAILED)
4435 rc = VINF_EM_NO_MEMORY;
4436 }
4437
4438 pgmUnlock(pVM);
4439 return rc;
4440}
4441
4442
4443/**
4444 * Frees the specified RAM page and replaces it with the ZERO page.
4445 *
4446 * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
4447 *
4448 * @param pVM The cross context VM structure.
4449 * @param pReq Pointer to the request.
4450 * @param pcPendingPages Where the number of pages waiting to be freed are
4451 * kept. This will normally be incremented.
4452 * @param pPage Pointer to the page structure.
4453 * @param GCPhys The guest physical address of the page, if applicable.
4454 *
4455 * @remarks The caller must own the PGM lock.
4456 */
4457int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
4458{
4459 /*
4460 * Assert sanity.
4461 */
4462 PGM_LOCK_ASSERT_OWNER(pVM);
4463 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
4464 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
4465 {
4466 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
4467 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
4468 }
4469
4470 /** @todo What about ballooning of large pages??! */
4471 Assert( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
4472 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
4473
4474 if ( PGM_PAGE_IS_ZERO(pPage)
4475 || PGM_PAGE_IS_BALLOONED(pPage))
4476 return VINF_SUCCESS;
4477
4478 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
4479 Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
4480 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
4481 || idPage > GMM_PAGEID_LAST
4482 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
4483 {
4484 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
4485 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
4486 }
4487
4488 /* update page count stats. */
4489 if (PGM_PAGE_IS_SHARED(pPage))
4490 pVM->pgm.s.cSharedPages--;
4491 else
4492 pVM->pgm.s.cPrivatePages--;
4493 pVM->pgm.s.cZeroPages++;
4494
4495 /* Deal with write monitored pages. */
4496 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
4497 {
4498 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
4499 pVM->pgm.s.cWrittenToPages++;
4500 }
4501
4502 /*
4503 * pPage = ZERO page.
4504 */
4505 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
4506 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
4507 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
4508 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
4509 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
4510 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
4511
4512 /* Flush physical page map TLB entry. */
4513 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
4514
4515 /*
4516 * Make sure it's not in the handy page array.
4517 */
4518 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
4519 {
4520 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
4521 {
4522 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
4523 break;
4524 }
4525 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
4526 {
4527 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
4528 break;
4529 }
4530 }
4531
4532 /*
4533 * Push it onto the page array.
4534 */
4535 uint32_t iPage = *pcPendingPages;
4536 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
4537 *pcPendingPages += 1;
4538
4539 pReq->aPages[iPage].idPage = idPage;
4540
4541 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
4542 return VINF_SUCCESS;
4543
4544 /*
4545 * Flush the pages.
4546 */
4547 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
4548 if (RT_SUCCESS(rc))
4549 {
4550 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
4551 *pcPendingPages = 0;
4552 }
4553 return rc;
4554}
4555
4556
4557/**
4558 * Converts a GC physical address to a HC ring-3 pointer, with some
4559 * additional checks.
4560 *
4561 * @returns VBox status code.
4562 * @retval VINF_SUCCESS on success.
4563 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4564 * access handler of some kind.
4565 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4566 * accesses or is odd in any way.
4567 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4568 *
4569 * @param pVM The cross context VM structure.
4570 * @param GCPhys The GC physical address to convert. Since this is only
4571 * used for filling the REM TLB, the A20 mask must be
4572 * applied before calling this API.
4573 * @param fWritable Whether write access is required.
4574 * @param ppv Where to store the pointer corresponding to GCPhys on
4575 * success.
4576 */
4577VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
4578{
4579 pgmLock(pVM);
4580 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4581
4582 PPGMRAMRANGE pRam;
4583 PPGMPAGE pPage;
4584 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4585 if (RT_SUCCESS(rc))
4586 {
4587 if (PGM_PAGE_IS_BALLOONED(pPage))
4588 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
4589 else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
4590 rc = VINF_SUCCESS;
4591 else
4592 {
4593 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4594 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4595 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
4596 {
4597 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
4598 * in -norawr0 mode. */
4599 if (fWritable)
4600 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
4601 }
4602 else
4603 {
4604 /* Temporarily disabled physical handler(s), since the recompiler
4605 doesn't get notified when it's reset we'll have to pretend it's
4606 operating normally. */
4607 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
4608 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4609 else
4610 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
4611 }
4612 }
4613 if (RT_SUCCESS(rc))
4614 {
4615 int rc2;
4616
4617 /* Make sure what we return is writable. */
4618 if (fWritable)
4619 switch (PGM_PAGE_GET_STATE(pPage))
4620 {
4621 case PGM_PAGE_STATE_ALLOCATED:
4622 break;
4623 case PGM_PAGE_STATE_BALLOONED:
4624 AssertFailed();
4625 break;
4626 case PGM_PAGE_STATE_ZERO:
4627 case PGM_PAGE_STATE_SHARED:
4628 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
4629 break;
4630 case PGM_PAGE_STATE_WRITE_MONITORED:
4631 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4632 AssertLogRelRCReturn(rc2, rc2);
4633 break;
4634 }
4635
4636 /* Get a ring-3 mapping of the address. */
4637 PPGMPAGER3MAPTLBE pTlbe;
4638 rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
4639 AssertLogRelRCReturn(rc2, rc2);
4640 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4641 /** @todo mapping/locking hell; this isn't horribly efficient since
4642 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
4643
4644 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4645 }
4646 else
4647 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4648
4649 /* else: handler catching all access, no pointer returned. */
4650 }
4651 else
4652 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4653
4654 pgmUnlock(pVM);
4655 return rc;
4656}
4657
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette