VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 93666

Last change on this file since 93666 was 93650, checked in by vboxsync, 3 years ago

VMM/PGM,*: Split the physical access handler type registration into separate ring-0 and ring-3 steps, expanding the type to 64-bit. bugref:10094

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 143.6 KB
Line 
1/* $Id: PGMAllPhys.cpp 93650 2022-02-08 10:43:53Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
51 * Checks if valid physical access handler return code (normal handler, not PF).
52 *
53 * Checks if the given strict status code is one of the expected ones for a
54 * physical access handler in the current context.
55 *
56 * @returns true or false.
57 * @param a_rcStrict The status code.
58 * @param a_fWrite Whether it is a write or read being serviced.
59 *
60 * @remarks We wish to keep the list of statuses here as short as possible.
61 * When changing, please make sure to update the PGMPhysRead,
62 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
63 */
64#ifdef IN_RING3
65# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
66 ( (a_rcStrict) == VINF_SUCCESS \
67 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
68#elif defined(IN_RING0)
69#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
70 ( (a_rcStrict) == VINF_SUCCESS \
71 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
72 \
73 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
74 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
75 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
76 \
77 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
78 || (a_rcStrict) == VINF_EM_DBG_STOP \
79 || (a_rcStrict) == VINF_EM_DBG_EVENT \
80 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
81 || (a_rcStrict) == VINF_EM_OFF \
82 || (a_rcStrict) == VINF_EM_SUSPEND \
83 || (a_rcStrict) == VINF_EM_RESET \
84 )
85#else
86# error "Context?"
87#endif
88
89/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
90 * Checks if valid virtual access handler return code (normal handler, not PF).
91 *
92 * Checks if the given strict status code is one of the expected ones for a
93 * virtual access handler in the current context.
94 *
95 * @returns true or false.
96 * @param a_rcStrict The status code.
97 * @param a_fWrite Whether it is a write or read being serviced.
98 *
99 * @remarks We wish to keep the list of statuses here as short as possible.
100 * When changing, please make sure to update the PGMPhysRead,
101 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
102 */
103#ifdef IN_RING3
104# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
105 ( (a_rcStrict) == VINF_SUCCESS \
106 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
107#elif defined(IN_RING0)
108# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
109 (false /* no virtual handlers in ring-0! */ )
110#else
111# error "Context?"
112#endif
113
114
115
116/**
117 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
118 */
119DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
120{
121 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
122 if (pRom->GCPhys == GCPhys)
123 return pRom;
124 return NULL;
125}
126
127#ifndef IN_RING3
128
129/**
130 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
131 * \#PF access handler callback for guest ROM range write access.}
132 *
133 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
134 */
135DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
136 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
137
138{
139 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
140 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
141 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
142 int rc;
143 RT_NOREF(uErrorCode, pvFault);
144
145 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
146
147 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
148 switch (pRom->aPages[iPage].enmProt)
149 {
150 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
151 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
152 {
153 /*
154 * If it's a simple instruction which doesn't change the cpu state
155 * we will simply skip it. Otherwise we'll have to defer it to REM.
156 */
157 uint32_t cbOp;
158 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
159 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
160 if ( RT_SUCCESS(rc)
161 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
162 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
163 {
164 switch (pDis->bOpCode)
165 {
166 /** @todo Find other instructions we can safely skip, possibly
167 * adding this kind of detection to DIS or EM. */
168 case OP_MOV:
169 pRegFrame->rip += cbOp;
170 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
171 return VINF_SUCCESS;
172 }
173 }
174 break;
175 }
176
177 case PGMROMPROT_READ_RAM_WRITE_RAM:
178 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
179 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
180 AssertRC(rc);
181 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
182
183 case PGMROMPROT_READ_ROM_WRITE_RAM:
184 /* Handle it in ring-3 because it's *way* easier there. */
185 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
186 break;
187
188 default:
189 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
190 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
191 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
192 }
193
194 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
195 return VINF_EM_RAW_EMULATE_INSTR;
196}
197
198#endif /* !IN_RING3 */
199
200
201/**
202 * @callback_method_impl{FNPGMPHYSHANDLER,
203 * Access handler callback for ROM write accesses.}
204 *
205 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
206 */
207DECLCALLBACK(VBOXSTRICTRC)
208pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
209 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
210{
211 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
212 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
213 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
214 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
215 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
216
217 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
218 RT_NOREF(pVCpu, pvPhys, enmOrigin);
219
220 if (enmAccessType == PGMACCESSTYPE_READ)
221 {
222 switch (pRomPage->enmProt)
223 {
224 /*
225 * Take the default action.
226 */
227 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
228 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
229 case PGMROMPROT_READ_ROM_WRITE_RAM:
230 case PGMROMPROT_READ_RAM_WRITE_RAM:
231 return VINF_PGM_HANDLER_DO_DEFAULT;
232
233 default:
234 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
235 pRom->aPages[iPage].enmProt, iPage, GCPhys),
236 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
237 }
238 }
239 else
240 {
241 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
242 switch (pRomPage->enmProt)
243 {
244 /*
245 * Ignore writes.
246 */
247 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
248 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
249 return VINF_SUCCESS;
250
251 /*
252 * Write to the RAM page.
253 */
254 case PGMROMPROT_READ_ROM_WRITE_RAM:
255 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
256 {
257 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
258 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
259
260 /*
261 * Take the lock, do lazy allocation, map the page and copy the data.
262 *
263 * Note that we have to bypass the mapping TLB since it works on
264 * guest physical addresses and entering the shadow page would
265 * kind of screw things up...
266 */
267 PGM_LOCK_VOID(pVM);
268
269 PPGMPAGE pShadowPage = &pRomPage->Shadow;
270 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
271 {
272 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
273 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
274 }
275
276 void *pvDstPage;
277 int rc;
278#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
279 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
280 {
281 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
282 rc = VINF_SUCCESS;
283 }
284 else
285#endif
286 {
287 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
288 if (RT_SUCCESS(rc))
289 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
290 }
291 if (RT_SUCCESS(rc))
292 {
293 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
294 pRomPage->LiveSave.fWrittenTo = true;
295
296 AssertMsg( rc == VINF_SUCCESS
297 || ( rc == VINF_PGM_SYNC_CR3
298 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
299 , ("%Rrc\n", rc));
300 rc = VINF_SUCCESS;
301 }
302
303 PGM_UNLOCK(pVM);
304 return rc;
305 }
306
307 default:
308 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
309 pRom->aPages[iPage].enmProt, iPage, GCPhys),
310 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
311 }
312 }
313}
314
315
316/**
317 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
318 */
319static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
320{
321 /*
322 * Get the MMIO2 range.
323 */
324 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
325 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
326 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
327 Assert(pMmio2->idMmio2 == hMmio2);
328 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
329 VERR_INTERNAL_ERROR_4);
330
331 /*
332 * Get the page and make sure it's an MMIO2 page.
333 */
334 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
335 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
336 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
337
338 /*
339 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
340 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
341 * page is dirty, saving the need for additional storage (bitmap).)
342 */
343 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
344
345 /*
346 * Disable the handler for this page.
347 */
348 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
349 AssertRC(rc);
350#ifndef IN_RING3
351 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
352 {
353 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
354 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
355 }
356#else
357 RT_NOREF(pVCpu, GCPtr);
358#endif
359 return VINF_SUCCESS;
360}
361
362
363#ifndef IN_RING3
364/**
365 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
366 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
367 *
368 * @remarks The @a uUser is the MMIO2 index.
369 */
370DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
371 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
372{
373 RT_NOREF(pVCpu, uErrorCode, pRegFrame);
374 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
375 if (RT_SUCCESS(rcStrict))
376 {
377 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
378 PGM_UNLOCK(pVM);
379 }
380 return rcStrict;
381}
382#endif /* !IN_RING3 */
383
384
385/**
386 * @callback_method_impl{FNPGMPHYSHANDLER,
387 * Access handler callback for MMIO2 dirty page tracing.}
388 *
389 * @remarks The @a uUser is the MMIO2 index.
390 */
391DECLCALLBACK(VBOXSTRICTRC)
392pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
393 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
394{
395 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
396 if (RT_SUCCESS(rcStrict))
397 {
398 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
399 PGM_UNLOCK(pVM);
400 if (rcStrict == VINF_SUCCESS)
401 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
402 }
403 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
404 return rcStrict;
405}
406
407
408/**
409 * Invalidates the RAM range TLBs.
410 *
411 * @param pVM The cross context VM structure.
412 */
413void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
414{
415 PGM_LOCK_VOID(pVM);
416 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
417 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
418 PGM_UNLOCK(pVM);
419}
420
421
422/**
423 * Tests if a value of type RTGCPHYS is negative if the type had been signed
424 * instead of unsigned.
425 *
426 * @returns @c true if negative, @c false if positive or zero.
427 * @param a_GCPhys The value to test.
428 * @todo Move me to iprt/types.h.
429 */
430#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
431
432
433/**
434 * Slow worker for pgmPhysGetRange.
435 *
436 * @copydoc pgmPhysGetRange
437 */
438PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
439{
440 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
441
442 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
443 while (pRam)
444 {
445 RTGCPHYS off = GCPhys - pRam->GCPhys;
446 if (off < pRam->cb)
447 {
448 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
449 return pRam;
450 }
451 if (RTGCPHYS_IS_NEGATIVE(off))
452 pRam = pRam->CTX_SUFF(pLeft);
453 else
454 pRam = pRam->CTX_SUFF(pRight);
455 }
456 return NULL;
457}
458
459
460/**
461 * Slow worker for pgmPhysGetRangeAtOrAbove.
462 *
463 * @copydoc pgmPhysGetRangeAtOrAbove
464 */
465PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
466{
467 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
468
469 PPGMRAMRANGE pLastLeft = NULL;
470 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
471 while (pRam)
472 {
473 RTGCPHYS off = GCPhys - pRam->GCPhys;
474 if (off < pRam->cb)
475 {
476 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
477 return pRam;
478 }
479 if (RTGCPHYS_IS_NEGATIVE(off))
480 {
481 pLastLeft = pRam;
482 pRam = pRam->CTX_SUFF(pLeft);
483 }
484 else
485 pRam = pRam->CTX_SUFF(pRight);
486 }
487 return pLastLeft;
488}
489
490
491/**
492 * Slow worker for pgmPhysGetPage.
493 *
494 * @copydoc pgmPhysGetPage
495 */
496PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
497{
498 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
499
500 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
501 while (pRam)
502 {
503 RTGCPHYS off = GCPhys - pRam->GCPhys;
504 if (off < pRam->cb)
505 {
506 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
507 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
508 }
509
510 if (RTGCPHYS_IS_NEGATIVE(off))
511 pRam = pRam->CTX_SUFF(pLeft);
512 else
513 pRam = pRam->CTX_SUFF(pRight);
514 }
515 return NULL;
516}
517
518
519/**
520 * Slow worker for pgmPhysGetPageEx.
521 *
522 * @copydoc pgmPhysGetPageEx
523 */
524int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
525{
526 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
527
528 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
529 while (pRam)
530 {
531 RTGCPHYS off = GCPhys - pRam->GCPhys;
532 if (off < pRam->cb)
533 {
534 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
535 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
536 return VINF_SUCCESS;
537 }
538
539 if (RTGCPHYS_IS_NEGATIVE(off))
540 pRam = pRam->CTX_SUFF(pLeft);
541 else
542 pRam = pRam->CTX_SUFF(pRight);
543 }
544
545 *ppPage = NULL;
546 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
547}
548
549
550/**
551 * Slow worker for pgmPhysGetPageAndRangeEx.
552 *
553 * @copydoc pgmPhysGetPageAndRangeEx
554 */
555int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
556{
557 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
558
559 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
560 while (pRam)
561 {
562 RTGCPHYS off = GCPhys - pRam->GCPhys;
563 if (off < pRam->cb)
564 {
565 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
566 *ppRam = pRam;
567 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
568 return VINF_SUCCESS;
569 }
570
571 if (RTGCPHYS_IS_NEGATIVE(off))
572 pRam = pRam->CTX_SUFF(pLeft);
573 else
574 pRam = pRam->CTX_SUFF(pRight);
575 }
576
577 *ppRam = NULL;
578 *ppPage = NULL;
579 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
580}
581
582
583/**
584 * Checks if Address Gate 20 is enabled or not.
585 *
586 * @returns true if enabled.
587 * @returns false if disabled.
588 * @param pVCpu The cross context virtual CPU structure.
589 */
590VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
591{
592 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
593 return pVCpu->pgm.s.fA20Enabled;
594}
595
596
597/**
598 * Validates a GC physical address.
599 *
600 * @returns true if valid.
601 * @returns false if invalid.
602 * @param pVM The cross context VM structure.
603 * @param GCPhys The physical address to validate.
604 */
605VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
606{
607 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
608 return pPage != NULL;
609}
610
611
612/**
613 * Checks if a GC physical address is a normal page,
614 * i.e. not ROM, MMIO or reserved.
615 *
616 * @returns true if normal.
617 * @returns false if invalid, ROM, MMIO or reserved page.
618 * @param pVM The cross context VM structure.
619 * @param GCPhys The physical address to check.
620 */
621VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
622{
623 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
624 return pPage
625 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
626}
627
628
629/**
630 * Converts a GC physical address to a HC physical address.
631 *
632 * @returns VINF_SUCCESS on success.
633 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
634 * page but has no physical backing.
635 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
636 * GC physical address.
637 *
638 * @param pVM The cross context VM structure.
639 * @param GCPhys The GC physical address to convert.
640 * @param pHCPhys Where to store the HC physical address on success.
641 */
642VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
643{
644 PGM_LOCK_VOID(pVM);
645 PPGMPAGE pPage;
646 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
647 if (RT_SUCCESS(rc))
648 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
649 PGM_UNLOCK(pVM);
650 return rc;
651}
652
653
654/**
655 * Invalidates all page mapping TLBs.
656 *
657 * @param pVM The cross context VM structure.
658 */
659void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
660{
661 PGM_LOCK_VOID(pVM);
662 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
663
664 /* Clear the R3 & R0 TLBs completely. */
665 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
666 {
667 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
668 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
669 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
670 }
671
672 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
673 {
674 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
675 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
676 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
677 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
678 }
679
680 PGM_UNLOCK(pVM);
681}
682
683
684/**
685 * Invalidates a page mapping TLB entry
686 *
687 * @param pVM The cross context VM structure.
688 * @param GCPhys GCPhys entry to flush
689 */
690void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
691{
692 PGM_LOCK_ASSERT_OWNER(pVM);
693
694 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
695
696 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
697
698 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
699 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
700 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
701
702 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
703 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
704 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
705 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
706}
707
708
709/**
710 * Makes sure that there is at least one handy page ready for use.
711 *
712 * This will also take the appropriate actions when reaching water-marks.
713 *
714 * @returns VBox status code.
715 * @retval VINF_SUCCESS on success.
716 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
717 *
718 * @param pVM The cross context VM structure.
719 *
720 * @remarks Must be called from within the PGM critical section. It may
721 * nip back to ring-3/0 in some cases.
722 */
723static int pgmPhysEnsureHandyPage(PVMCC pVM)
724{
725 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
726
727 /*
728 * Do we need to do anything special?
729 */
730#ifdef IN_RING3
731 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
732#else
733 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
734#endif
735 {
736 /*
737 * Allocate pages only if we're out of them, or in ring-3, almost out.
738 */
739#ifdef IN_RING3
740 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
741#else
742 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
743#endif
744 {
745 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
746 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
747#ifdef IN_RING3
748 int rc = PGMR3PhysAllocateHandyPages(pVM);
749#else
750 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
751#endif
752 if (RT_UNLIKELY(rc != VINF_SUCCESS))
753 {
754 if (RT_FAILURE(rc))
755 return rc;
756 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
757 if (!pVM->pgm.s.cHandyPages)
758 {
759 LogRel(("PGM: no more handy pages!\n"));
760 return VERR_EM_NO_MEMORY;
761 }
762 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
763 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
764#ifndef IN_RING3
765 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
766#endif
767 }
768 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
769 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
770 ("%u\n", pVM->pgm.s.cHandyPages),
771 VERR_PGM_HANDY_PAGE_IPE);
772 }
773 else
774 {
775 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
776 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
777#ifndef IN_RING3
778 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
779 {
780 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
781 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
782 }
783#endif
784 }
785 }
786
787 return VINF_SUCCESS;
788}
789
790
791/**
792 * Replace a zero or shared page with new page that we can write to.
793 *
794 * @returns The following VBox status codes.
795 * @retval VINF_SUCCESS on success, pPage is modified.
796 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
797 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
798 *
799 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
800 *
801 * @param pVM The cross context VM structure.
802 * @param pPage The physical page tracking structure. This will
803 * be modified on success.
804 * @param GCPhys The address of the page.
805 *
806 * @remarks Must be called from within the PGM critical section. It may
807 * nip back to ring-3/0 in some cases.
808 *
809 * @remarks This function shouldn't really fail, however if it does
810 * it probably means we've screwed up the size of handy pages and/or
811 * the low-water mark. Or, that some device I/O is causing a lot of
812 * pages to be allocated while while the host is in a low-memory
813 * condition. This latter should be handled elsewhere and in a more
814 * controlled manner, it's on the @bugref{3170} todo list...
815 */
816int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
817{
818 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
819
820 /*
821 * Prereqs.
822 */
823 PGM_LOCK_ASSERT_OWNER(pVM);
824 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
825 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
826
827# ifdef PGM_WITH_LARGE_PAGES
828 /*
829 * Try allocate a large page if applicable.
830 */
831 if ( PGMIsUsingLargePages(pVM)
832 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
833 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
834 {
835 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
836 PPGMPAGE pBasePage;
837
838 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
839 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
840 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
841 {
842 rc = pgmPhysAllocLargePage(pVM, GCPhys);
843 if (rc == VINF_SUCCESS)
844 return rc;
845 }
846 /* Mark the base as type page table, so we don't check over and over again. */
847 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
848
849 /* fall back to 4KB pages. */
850 }
851# endif
852
853 /*
854 * Flush any shadow page table mappings of the page.
855 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
856 */
857 bool fFlushTLBs = false;
858 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
859 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
860
861 /*
862 * Ensure that we've got a page handy, take it and use it.
863 */
864 int rc2 = pgmPhysEnsureHandyPage(pVM);
865 if (RT_FAILURE(rc2))
866 {
867 if (fFlushTLBs)
868 PGM_INVL_ALL_VCPU_TLBS(pVM);
869 Assert(rc2 == VERR_EM_NO_MEMORY);
870 return rc2;
871 }
872 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
873 PGM_LOCK_ASSERT_OWNER(pVM);
874 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
875 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
876
877 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
878 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
879 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
880 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
881 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
882 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
883
884 /*
885 * There are one or two action to be taken the next time we allocate handy pages:
886 * - Tell the GMM (global memory manager) what the page is being used for.
887 * (Speeds up replacement operations - sharing and defragmenting.)
888 * - If the current backing is shared, it must be freed.
889 */
890 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
891 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
892
893 void const *pvSharedPage = NULL;
894 if (PGM_PAGE_IS_SHARED(pPage))
895 {
896 /* Mark this shared page for freeing/dereferencing. */
897 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
898 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
899
900 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
901 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
902 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
903 pVM->pgm.s.cSharedPages--;
904
905 /* Grab the address of the page so we can make a copy later on. (safe) */
906 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
907 AssertRC(rc);
908 }
909 else
910 {
911 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
912 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
913 pVM->pgm.s.cZeroPages--;
914 }
915
916 /*
917 * Do the PGMPAGE modifications.
918 */
919 pVM->pgm.s.cPrivatePages++;
920 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
921 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
922 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
923 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
924 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
925
926 /* Copy the shared page contents to the replacement page. */
927 if (pvSharedPage)
928 {
929 /* Get the virtual address of the new page. */
930 PGMPAGEMAPLOCK PgMpLck;
931 void *pvNewPage;
932 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
933 if (RT_SUCCESS(rc))
934 {
935 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
936 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
937 }
938 }
939
940 if ( fFlushTLBs
941 && rc != VINF_PGM_GCPHYS_ALIASED)
942 PGM_INVL_ALL_VCPU_TLBS(pVM);
943
944 /*
945 * Notify NEM about the mapping change for this page.
946 *
947 * Note! Shadow ROM pages are complicated as they can definitely be
948 * allocated while not visible, so play safe.
949 */
950 if (VM_IS_NEM_ENABLED(pVM))
951 {
952 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
953 if ( enmType != PGMPAGETYPE_ROM_SHADOW
954 || pgmPhysGetPage(pVM, GCPhys) == pPage)
955 {
956 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
957 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
958 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
959 if (RT_SUCCESS(rc))
960 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
961 else
962 rc = rc2;
963 }
964 }
965
966 return rc;
967}
968
969#ifdef PGM_WITH_LARGE_PAGES
970
971/**
972 * Replace a 2 MB range of zero pages with new pages that we can write to.
973 *
974 * @returns The following VBox status codes.
975 * @retval VINF_SUCCESS on success, pPage is modified.
976 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
977 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
978 *
979 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
980 *
981 * @param pVM The cross context VM structure.
982 * @param GCPhys The address of the page.
983 *
984 * @remarks Must be called from within the PGM critical section. It may block
985 * on GMM and host mutexes/locks, leaving HM context.
986 */
987int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
988{
989 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
990 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
991 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
992
993 /*
994 * Check Prereqs.
995 */
996 PGM_LOCK_ASSERT_OWNER(pVM);
997 Assert(PGMIsUsingLargePages(pVM));
998
999 /*
1000 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1001 */
1002 PPGMPAGE pFirstPage;
1003 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1004 if ( RT_SUCCESS(rc)
1005 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1006 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1007 {
1008 /*
1009 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1010 * since they are unallocated.
1011 */
1012 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1013 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1014 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1015 {
1016 /*
1017 * Now, make sure all the other pages in the 2 MB is in the same state.
1018 */
1019 GCPhys = GCPhysBase;
1020 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1021 while (cLeft-- > 0)
1022 {
1023 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1024 if ( pSubPage
1025 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1026 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1027 {
1028 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1029 GCPhys += GUEST_PAGE_SIZE;
1030 }
1031 else
1032 {
1033 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1034 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1035
1036 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1037 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1038 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1039 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1040 }
1041 }
1042
1043 /*
1044 * Do the allocation.
1045 */
1046# ifdef IN_RING3
1047 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1048# elif defined(IN_RING0)
1049 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1050# else
1051# error "Port me"
1052# endif
1053 if (RT_SUCCESS(rc))
1054 {
1055 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1056 pVM->pgm.s.cLargePages++;
1057 return VINF_SUCCESS;
1058 }
1059
1060 /* If we fail once, it most likely means the host's memory is too
1061 fragmented; don't bother trying again. */
1062 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1063 return rc;
1064 }
1065 }
1066 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1067}
1068
1069
1070/**
1071 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1072 *
1073 * @returns The following VBox status codes.
1074 * @retval VINF_SUCCESS on success, the large page can be used again
1075 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1076 *
1077 * @param pVM The cross context VM structure.
1078 * @param GCPhys The address of the page.
1079 * @param pLargePage Page structure of the base page
1080 */
1081int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1082{
1083 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1084
1085 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1086
1087 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1088
1089 /* Check the base page. */
1090 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1091 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1092 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1093 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1094 {
1095 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1096 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1097 }
1098
1099 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1100 /* Check all remaining pages in the 2 MB range. */
1101 unsigned i;
1102 GCPhys += GUEST_PAGE_SIZE;
1103 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1104 {
1105 PPGMPAGE pPage;
1106 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1107 AssertRCBreak(rc);
1108
1109 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1110 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1111 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1112 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1113 {
1114 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1115 break;
1116 }
1117
1118 GCPhys += GUEST_PAGE_SIZE;
1119 }
1120 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1121
1122 if (i == _2M / GUEST_PAGE_SIZE)
1123 {
1124 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1125 pVM->pgm.s.cLargePagesDisabled--;
1126 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1127 return VINF_SUCCESS;
1128 }
1129
1130 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1131}
1132
1133#endif /* PGM_WITH_LARGE_PAGES */
1134
1135
1136/**
1137 * Deal with a write monitored page.
1138 *
1139 * @returns VBox strict status code.
1140 *
1141 * @param pVM The cross context VM structure.
1142 * @param pPage The physical page tracking structure.
1143 * @param GCPhys The guest physical address of the page.
1144 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1145 * very unlikely situation where it is okay that we let NEM
1146 * fix the page access in a lazy fasion.
1147 *
1148 * @remarks Called from within the PGM critical section.
1149 */
1150void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1151{
1152 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1153 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1154 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1155 Assert(pVM->pgm.s.cMonitoredPages > 0);
1156 pVM->pgm.s.cMonitoredPages--;
1157 pVM->pgm.s.cWrittenToPages++;
1158
1159#ifdef VBOX_WITH_NATIVE_NEM
1160 /*
1161 * Notify NEM about the protection change so we won't spin forever.
1162 *
1163 * Note! NEM need to be handle to lazily correct page protection as we cannot
1164 * really get it 100% right here it seems. The page pool does this too.
1165 */
1166 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1167 {
1168 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1169 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1170 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1171 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1172 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1173 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1174 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1175 }
1176#else
1177 RT_NOREF(GCPhys);
1178#endif
1179}
1180
1181
1182/**
1183 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1184 *
1185 * @returns VBox strict status code.
1186 * @retval VINF_SUCCESS on success.
1187 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1188 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1189 *
1190 * @param pVM The cross context VM structure.
1191 * @param pPage The physical page tracking structure.
1192 * @param GCPhys The address of the page.
1193 *
1194 * @remarks Called from within the PGM critical section.
1195 */
1196int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1197{
1198 PGM_LOCK_ASSERT_OWNER(pVM);
1199 switch (PGM_PAGE_GET_STATE(pPage))
1200 {
1201 case PGM_PAGE_STATE_WRITE_MONITORED:
1202 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1203 RT_FALL_THRU();
1204 default: /* to shut up GCC */
1205 case PGM_PAGE_STATE_ALLOCATED:
1206 return VINF_SUCCESS;
1207
1208 /*
1209 * Zero pages can be dummy pages for MMIO or reserved memory,
1210 * so we need to check the flags before joining cause with
1211 * shared page replacement.
1212 */
1213 case PGM_PAGE_STATE_ZERO:
1214 if (PGM_PAGE_IS_MMIO(pPage))
1215 return VERR_PGM_PHYS_PAGE_RESERVED;
1216 RT_FALL_THRU();
1217 case PGM_PAGE_STATE_SHARED:
1218 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1219
1220 /* Not allowed to write to ballooned pages. */
1221 case PGM_PAGE_STATE_BALLOONED:
1222 return VERR_PGM_PHYS_PAGE_BALLOONED;
1223 }
1224}
1225
1226
1227/**
1228 * Internal usage: Map the page specified by its GMM ID.
1229 *
1230 * This is similar to pgmPhysPageMap
1231 *
1232 * @returns VBox status code.
1233 *
1234 * @param pVM The cross context VM structure.
1235 * @param idPage The Page ID.
1236 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1237 * @param ppv Where to store the mapping address.
1238 *
1239 * @remarks Called from within the PGM critical section. The mapping is only
1240 * valid while you are inside this section.
1241 */
1242int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1243{
1244 /*
1245 * Validation.
1246 */
1247 PGM_LOCK_ASSERT_OWNER(pVM);
1248 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1249 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1250 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1251
1252#ifdef IN_RING0
1253# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1254 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1255# else
1256 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1257# endif
1258
1259#else
1260 /*
1261 * Find/make Chunk TLB entry for the mapping chunk.
1262 */
1263 PPGMCHUNKR3MAP pMap;
1264 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1265 if (pTlbe->idChunk == idChunk)
1266 {
1267 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1268 pMap = pTlbe->pChunk;
1269 }
1270 else
1271 {
1272 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1273
1274 /*
1275 * Find the chunk, map it if necessary.
1276 */
1277 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1278 if (pMap)
1279 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1280 else
1281 {
1282 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1283 if (RT_FAILURE(rc))
1284 return rc;
1285 }
1286
1287 /*
1288 * Enter it into the Chunk TLB.
1289 */
1290 pTlbe->idChunk = idChunk;
1291 pTlbe->pChunk = pMap;
1292 }
1293
1294 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1295 return VINF_SUCCESS;
1296#endif
1297}
1298
1299
1300/**
1301 * Maps a page into the current virtual address space so it can be accessed.
1302 *
1303 * @returns VBox status code.
1304 * @retval VINF_SUCCESS on success.
1305 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1306 *
1307 * @param pVM The cross context VM structure.
1308 * @param pPage The physical page tracking structure.
1309 * @param GCPhys The address of the page.
1310 * @param ppMap Where to store the address of the mapping tracking structure.
1311 * @param ppv Where to store the mapping address of the page. The page
1312 * offset is masked off!
1313 *
1314 * @remarks Called from within the PGM critical section.
1315 */
1316static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1317{
1318 PGM_LOCK_ASSERT_OWNER(pVM);
1319 NOREF(GCPhys);
1320
1321 /*
1322 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1323 */
1324 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1325 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1326 {
1327 /* Decode the page id to a page in a MMIO2 ram range. */
1328 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1329 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1330 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1331 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1332 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1333 pPage->s.idPage, pPage->s.uStateY),
1334 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1335 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1336 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1337 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1338 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1339 *ppMap = NULL;
1340# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1341 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1342# elif defined(IN_RING0)
1343 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1344 return VINF_SUCCESS;
1345# else
1346 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1347 return VINF_SUCCESS;
1348# endif
1349 }
1350
1351# ifdef VBOX_WITH_PGM_NEM_MODE
1352 if (pVM->pgm.s.fNemMode)
1353 {
1354# ifdef IN_RING3
1355 /*
1356 * Find the corresponding RAM range and use that to locate the mapping address.
1357 */
1358 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1359 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1360 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1361 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1362 Assert(pPage == &pRam->aPages[idxPage]);
1363 *ppMap = NULL;
1364 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1365 return VINF_SUCCESS;
1366# else
1367 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1368# endif
1369 }
1370# endif
1371
1372 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1373 if (idChunk == NIL_GMM_CHUNKID)
1374 {
1375 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1376 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1377 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1378 {
1379 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1380 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1381 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1382 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1383 *ppv = pVM->pgm.s.abZeroPg;
1384 }
1385 else
1386 *ppv = pVM->pgm.s.abZeroPg;
1387 *ppMap = NULL;
1388 return VINF_SUCCESS;
1389 }
1390
1391# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1392 /*
1393 * Just use the physical address.
1394 */
1395 *ppMap = NULL;
1396 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1397
1398# elif defined(IN_RING0)
1399 /*
1400 * Go by page ID thru GMMR0.
1401 */
1402 *ppMap = NULL;
1403 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1404
1405# else
1406 /*
1407 * Find/make Chunk TLB entry for the mapping chunk.
1408 */
1409 PPGMCHUNKR3MAP pMap;
1410 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1411 if (pTlbe->idChunk == idChunk)
1412 {
1413 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1414 pMap = pTlbe->pChunk;
1415 AssertPtr(pMap->pv);
1416 }
1417 else
1418 {
1419 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1420
1421 /*
1422 * Find the chunk, map it if necessary.
1423 */
1424 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1425 if (pMap)
1426 {
1427 AssertPtr(pMap->pv);
1428 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1429 }
1430 else
1431 {
1432 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1433 if (RT_FAILURE(rc))
1434 return rc;
1435 AssertPtr(pMap->pv);
1436 }
1437
1438 /*
1439 * Enter it into the Chunk TLB.
1440 */
1441 pTlbe->idChunk = idChunk;
1442 pTlbe->pChunk = pMap;
1443 }
1444
1445 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1446 *ppMap = pMap;
1447 return VINF_SUCCESS;
1448# endif /* !IN_RING0 */
1449}
1450
1451
1452/**
1453 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1454 *
1455 * This is typically used is paths where we cannot use the TLB methods (like ROM
1456 * pages) or where there is no point in using them since we won't get many hits.
1457 *
1458 * @returns VBox strict status code.
1459 * @retval VINF_SUCCESS on success.
1460 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1461 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1462 *
1463 * @param pVM The cross context VM structure.
1464 * @param pPage The physical page tracking structure.
1465 * @param GCPhys The address of the page.
1466 * @param ppv Where to store the mapping address of the page. The page
1467 * offset is masked off!
1468 *
1469 * @remarks Called from within the PGM critical section. The mapping is only
1470 * valid while you are inside section.
1471 */
1472int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1473{
1474 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1475 if (RT_SUCCESS(rc))
1476 {
1477 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1478 PPGMPAGEMAP pMapIgnore;
1479 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1480 if (RT_FAILURE(rc2)) /* preserve rc */
1481 rc = rc2;
1482 }
1483 return rc;
1484}
1485
1486
1487/**
1488 * Maps a page into the current virtual address space so it can be accessed for
1489 * both writing and reading.
1490 *
1491 * This is typically used is paths where we cannot use the TLB methods (like ROM
1492 * pages) or where there is no point in using them since we won't get many hits.
1493 *
1494 * @returns VBox status code.
1495 * @retval VINF_SUCCESS on success.
1496 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1497 *
1498 * @param pVM The cross context VM structure.
1499 * @param pPage The physical page tracking structure. Must be in the
1500 * allocated state.
1501 * @param GCPhys The address of the page.
1502 * @param ppv Where to store the mapping address of the page. The page
1503 * offset is masked off!
1504 *
1505 * @remarks Called from within the PGM critical section. The mapping is only
1506 * valid while you are inside section.
1507 */
1508int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1509{
1510 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1511 PPGMPAGEMAP pMapIgnore;
1512 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1513}
1514
1515
1516/**
1517 * Maps a page into the current virtual address space so it can be accessed for
1518 * reading.
1519 *
1520 * This is typically used is paths where we cannot use the TLB methods (like ROM
1521 * pages) or where there is no point in using them since we won't get many hits.
1522 *
1523 * @returns VBox status code.
1524 * @retval VINF_SUCCESS on success.
1525 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1526 *
1527 * @param pVM The cross context VM structure.
1528 * @param pPage The physical page tracking structure.
1529 * @param GCPhys The address of the page.
1530 * @param ppv Where to store the mapping address of the page. The page
1531 * offset is masked off!
1532 *
1533 * @remarks Called from within the PGM critical section. The mapping is only
1534 * valid while you are inside this section.
1535 */
1536int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1537{
1538 PPGMPAGEMAP pMapIgnore;
1539 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1540}
1541
1542
1543/**
1544 * Load a guest page into the ring-3 physical TLB.
1545 *
1546 * @returns VBox status code.
1547 * @retval VINF_SUCCESS on success
1548 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1549 * @param pVM The cross context VM structure.
1550 * @param GCPhys The guest physical address in question.
1551 */
1552int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1553{
1554 PGM_LOCK_ASSERT_OWNER(pVM);
1555
1556 /*
1557 * Find the ram range and page and hand it over to the with-page function.
1558 * 99.8% of requests are expected to be in the first range.
1559 */
1560 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1561 if (!pPage)
1562 {
1563 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1564 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1565 }
1566
1567 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1568}
1569
1570
1571/**
1572 * Load a guest page into the ring-3 physical TLB.
1573 *
1574 * @returns VBox status code.
1575 * @retval VINF_SUCCESS on success
1576 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1577 *
1578 * @param pVM The cross context VM structure.
1579 * @param pPage Pointer to the PGMPAGE structure corresponding to
1580 * GCPhys.
1581 * @param GCPhys The guest physical address in question.
1582 */
1583int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1584{
1585 PGM_LOCK_ASSERT_OWNER(pVM);
1586 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1587
1588 /*
1589 * Map the page.
1590 * Make a special case for the zero page as it is kind of special.
1591 */
1592 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1593 if ( !PGM_PAGE_IS_ZERO(pPage)
1594 && !PGM_PAGE_IS_BALLOONED(pPage))
1595 {
1596 void *pv;
1597 PPGMPAGEMAP pMap;
1598 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1599 if (RT_FAILURE(rc))
1600 return rc;
1601# ifndef IN_RING0
1602 pTlbe->pMap = pMap;
1603# endif
1604 pTlbe->pv = pv;
1605 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1606 }
1607 else
1608 {
1609 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1610# ifndef IN_RING0
1611 pTlbe->pMap = NULL;
1612# endif
1613 pTlbe->pv = pVM->pgm.s.abZeroPg;
1614 }
1615# ifdef PGM_WITH_PHYS_TLB
1616 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1617 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1618 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1619 else
1620 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1621# else
1622 pTlbe->GCPhys = NIL_RTGCPHYS;
1623# endif
1624 pTlbe->pPage = pPage;
1625 return VINF_SUCCESS;
1626}
1627
1628
1629/**
1630 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1631 * own the PGM lock and therefore not need to lock the mapped page.
1632 *
1633 * @returns VBox status code.
1634 * @retval VINF_SUCCESS on success.
1635 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1636 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1637 *
1638 * @param pVM The cross context VM structure.
1639 * @param GCPhys The guest physical address of the page that should be mapped.
1640 * @param pPage Pointer to the PGMPAGE structure for the page.
1641 * @param ppv Where to store the address corresponding to GCPhys.
1642 *
1643 * @internal
1644 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1645 */
1646int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1647{
1648 int rc;
1649 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1650 PGM_LOCK_ASSERT_OWNER(pVM);
1651 pVM->pgm.s.cDeprecatedPageLocks++;
1652
1653 /*
1654 * Make sure the page is writable.
1655 */
1656 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1657 {
1658 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1659 if (RT_FAILURE(rc))
1660 return rc;
1661 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1662 }
1663 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1664
1665 /*
1666 * Get the mapping address.
1667 */
1668 PPGMPAGEMAPTLBE pTlbe;
1669 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1670 if (RT_FAILURE(rc))
1671 return rc;
1672 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1673 return VINF_SUCCESS;
1674}
1675
1676
1677/**
1678 * Locks a page mapping for writing.
1679 *
1680 * @param pVM The cross context VM structure.
1681 * @param pPage The page.
1682 * @param pTlbe The mapping TLB entry for the page.
1683 * @param pLock The lock structure (output).
1684 */
1685DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1686{
1687# ifndef IN_RING0
1688 PPGMPAGEMAP pMap = pTlbe->pMap;
1689 if (pMap)
1690 pMap->cRefs++;
1691# else
1692 RT_NOREF(pTlbe);
1693# endif
1694
1695 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1696 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1697 {
1698 if (cLocks == 0)
1699 pVM->pgm.s.cWriteLockedPages++;
1700 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1701 }
1702 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1703 {
1704 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1705 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1706# ifndef IN_RING0
1707 if (pMap)
1708 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1709# endif
1710 }
1711
1712 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1713# ifndef IN_RING0
1714 pLock->pvMap = pMap;
1715# else
1716 pLock->pvMap = NULL;
1717# endif
1718}
1719
1720/**
1721 * Locks a page mapping for reading.
1722 *
1723 * @param pVM The cross context VM structure.
1724 * @param pPage The page.
1725 * @param pTlbe The mapping TLB entry for the page.
1726 * @param pLock The lock structure (output).
1727 */
1728DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1729{
1730# ifndef IN_RING0
1731 PPGMPAGEMAP pMap = pTlbe->pMap;
1732 if (pMap)
1733 pMap->cRefs++;
1734# else
1735 RT_NOREF(pTlbe);
1736# endif
1737
1738 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1739 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1740 {
1741 if (cLocks == 0)
1742 pVM->pgm.s.cReadLockedPages++;
1743 PGM_PAGE_INC_READ_LOCKS(pPage);
1744 }
1745 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1746 {
1747 PGM_PAGE_INC_READ_LOCKS(pPage);
1748 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1749# ifndef IN_RING0
1750 if (pMap)
1751 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1752# endif
1753 }
1754
1755 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1756# ifndef IN_RING0
1757 pLock->pvMap = pMap;
1758# else
1759 pLock->pvMap = NULL;
1760# endif
1761}
1762
1763
1764/**
1765 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1766 * own the PGM lock and have access to the page structure.
1767 *
1768 * @returns VBox status code.
1769 * @retval VINF_SUCCESS on success.
1770 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1771 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1772 *
1773 * @param pVM The cross context VM structure.
1774 * @param GCPhys The guest physical address of the page that should be mapped.
1775 * @param pPage Pointer to the PGMPAGE structure for the page.
1776 * @param ppv Where to store the address corresponding to GCPhys.
1777 * @param pLock Where to store the lock information that
1778 * pgmPhysReleaseInternalPageMappingLock needs.
1779 *
1780 * @internal
1781 */
1782int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1783{
1784 int rc;
1785 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1786 PGM_LOCK_ASSERT_OWNER(pVM);
1787
1788 /*
1789 * Make sure the page is writable.
1790 */
1791 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1792 {
1793 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1794 if (RT_FAILURE(rc))
1795 return rc;
1796 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1797 }
1798 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1799
1800 /*
1801 * Do the job.
1802 */
1803 PPGMPAGEMAPTLBE pTlbe;
1804 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1805 if (RT_FAILURE(rc))
1806 return rc;
1807 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1808 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1809 return VINF_SUCCESS;
1810}
1811
1812
1813/**
1814 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1815 * own the PGM lock and have access to the page structure.
1816 *
1817 * @returns VBox status code.
1818 * @retval VINF_SUCCESS on success.
1819 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1820 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1821 *
1822 * @param pVM The cross context VM structure.
1823 * @param GCPhys The guest physical address of the page that should be mapped.
1824 * @param pPage Pointer to the PGMPAGE structure for the page.
1825 * @param ppv Where to store the address corresponding to GCPhys.
1826 * @param pLock Where to store the lock information that
1827 * pgmPhysReleaseInternalPageMappingLock needs.
1828 *
1829 * @internal
1830 */
1831int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1832{
1833 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1834 PGM_LOCK_ASSERT_OWNER(pVM);
1835 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1836
1837 /*
1838 * Do the job.
1839 */
1840 PPGMPAGEMAPTLBE pTlbe;
1841 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1842 if (RT_FAILURE(rc))
1843 return rc;
1844 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1845 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1846 return VINF_SUCCESS;
1847}
1848
1849
1850/**
1851 * Requests the mapping of a guest page into the current context.
1852 *
1853 * This API should only be used for very short term, as it will consume scarse
1854 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1855 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1856 *
1857 * This API will assume your intention is to write to the page, and will
1858 * therefore replace shared and zero pages. If you do not intend to modify
1859 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1860 *
1861 * @returns VBox status code.
1862 * @retval VINF_SUCCESS on success.
1863 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1864 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1865 *
1866 * @param pVM The cross context VM structure.
1867 * @param GCPhys The guest physical address of the page that should be
1868 * mapped.
1869 * @param ppv Where to store the address corresponding to GCPhys.
1870 * @param pLock Where to store the lock information that
1871 * PGMPhysReleasePageMappingLock needs.
1872 *
1873 * @remarks The caller is responsible for dealing with access handlers.
1874 * @todo Add an informational return code for pages with access handlers?
1875 *
1876 * @remark Avoid calling this API from within critical sections (other than
1877 * the PGM one) because of the deadlock risk. External threads may
1878 * need to delegate jobs to the EMTs.
1879 * @remarks Only one page is mapped! Make no assumption about what's after or
1880 * before the returned page!
1881 * @thread Any thread.
1882 */
1883VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1884{
1885 int rc = PGM_LOCK(pVM);
1886 AssertRCReturn(rc, rc);
1887
1888 /*
1889 * Query the Physical TLB entry for the page (may fail).
1890 */
1891 PPGMPAGEMAPTLBE pTlbe;
1892 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1893 if (RT_SUCCESS(rc))
1894 {
1895 /*
1896 * If the page is shared, the zero page, or being write monitored
1897 * it must be converted to a page that's writable if possible.
1898 */
1899 PPGMPAGE pPage = pTlbe->pPage;
1900 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1901 {
1902 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1903 if (RT_SUCCESS(rc))
1904 {
1905 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1906 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1907 }
1908 }
1909 if (RT_SUCCESS(rc))
1910 {
1911 /*
1912 * Now, just perform the locking and calculate the return address.
1913 */
1914 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1915 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1916 }
1917 }
1918
1919 PGM_UNLOCK(pVM);
1920 return rc;
1921}
1922
1923
1924/**
1925 * Requests the mapping of a guest page into the current context.
1926 *
1927 * This API should only be used for very short term, as it will consume scarse
1928 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1929 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1930 *
1931 * @returns VBox status code.
1932 * @retval VINF_SUCCESS on success.
1933 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1934 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1935 *
1936 * @param pVM The cross context VM structure.
1937 * @param GCPhys The guest physical address of the page that should be
1938 * mapped.
1939 * @param ppv Where to store the address corresponding to GCPhys.
1940 * @param pLock Where to store the lock information that
1941 * PGMPhysReleasePageMappingLock needs.
1942 *
1943 * @remarks The caller is responsible for dealing with access handlers.
1944 * @todo Add an informational return code for pages with access handlers?
1945 *
1946 * @remarks Avoid calling this API from within critical sections (other than
1947 * the PGM one) because of the deadlock risk.
1948 * @remarks Only one page is mapped! Make no assumption about what's after or
1949 * before the returned page!
1950 * @thread Any thread.
1951 */
1952VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1953{
1954 int rc = PGM_LOCK(pVM);
1955 AssertRCReturn(rc, rc);
1956
1957 /*
1958 * Query the Physical TLB entry for the page (may fail).
1959 */
1960 PPGMPAGEMAPTLBE pTlbe;
1961 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1962 if (RT_SUCCESS(rc))
1963 {
1964 /* MMIO pages doesn't have any readable backing. */
1965 PPGMPAGE pPage = pTlbe->pPage;
1966 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1967 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1968 else
1969 {
1970 /*
1971 * Now, just perform the locking and calculate the return address.
1972 */
1973 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1974 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1975 }
1976 }
1977
1978 PGM_UNLOCK(pVM);
1979 return rc;
1980}
1981
1982
1983/**
1984 * Requests the mapping of a guest page given by virtual address into the current context.
1985 *
1986 * This API should only be used for very short term, as it will consume
1987 * scarse resources (R0 and GC) in the mapping cache. When you're done
1988 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1989 *
1990 * This API will assume your intention is to write to the page, and will
1991 * therefore replace shared and zero pages. If you do not intend to modify
1992 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1993 *
1994 * @returns VBox status code.
1995 * @retval VINF_SUCCESS on success.
1996 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1997 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1998 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1999 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2000 *
2001 * @param pVCpu The cross context virtual CPU structure.
2002 * @param GCPtr The guest physical address of the page that should be
2003 * mapped.
2004 * @param ppv Where to store the address corresponding to GCPhys.
2005 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2006 *
2007 * @remark Avoid calling this API from within critical sections (other than
2008 * the PGM one) because of the deadlock risk.
2009 * @thread EMT
2010 */
2011VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2012{
2013 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2014 RTGCPHYS GCPhys;
2015 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2016 if (RT_SUCCESS(rc))
2017 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2018 return rc;
2019}
2020
2021
2022/**
2023 * Requests the mapping of a guest page given by virtual address into the current context.
2024 *
2025 * This API should only be used for very short term, as it will consume
2026 * scarse resources (R0 and GC) in the mapping cache. When you're done
2027 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2028 *
2029 * @returns VBox status code.
2030 * @retval VINF_SUCCESS on success.
2031 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2032 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2033 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2034 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2035 *
2036 * @param pVCpu The cross context virtual CPU structure.
2037 * @param GCPtr The guest physical address of the page that should be
2038 * mapped.
2039 * @param ppv Where to store the address corresponding to GCPtr.
2040 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2041 *
2042 * @remark Avoid calling this API from within critical sections (other than
2043 * the PGM one) because of the deadlock risk.
2044 * @thread EMT
2045 */
2046VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2047{
2048 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2049 RTGCPHYS GCPhys;
2050 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2051 if (RT_SUCCESS(rc))
2052 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2053 return rc;
2054}
2055
2056
2057/**
2058 * Release the mapping of a guest page.
2059 *
2060 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2061 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2062 *
2063 * @param pVM The cross context VM structure.
2064 * @param pLock The lock structure initialized by the mapping function.
2065 */
2066VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2067{
2068# ifndef IN_RING0
2069 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2070# endif
2071 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2072 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2073
2074 pLock->uPageAndType = 0;
2075 pLock->pvMap = NULL;
2076
2077 PGM_LOCK_VOID(pVM);
2078 if (fWriteLock)
2079 {
2080 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2081 Assert(cLocks > 0);
2082 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2083 {
2084 if (cLocks == 1)
2085 {
2086 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2087 pVM->pgm.s.cWriteLockedPages--;
2088 }
2089 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2090 }
2091
2092 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2093 { /* probably extremely likely */ }
2094 else
2095 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2096 }
2097 else
2098 {
2099 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2100 Assert(cLocks > 0);
2101 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2102 {
2103 if (cLocks == 1)
2104 {
2105 Assert(pVM->pgm.s.cReadLockedPages > 0);
2106 pVM->pgm.s.cReadLockedPages--;
2107 }
2108 PGM_PAGE_DEC_READ_LOCKS(pPage);
2109 }
2110 }
2111
2112# ifndef IN_RING0
2113 if (pMap)
2114 {
2115 Assert(pMap->cRefs >= 1);
2116 pMap->cRefs--;
2117 }
2118# endif
2119 PGM_UNLOCK(pVM);
2120}
2121
2122
2123#ifdef IN_RING3
2124/**
2125 * Release the mapping of multiple guest pages.
2126 *
2127 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2128 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2129 *
2130 * @param pVM The cross context VM structure.
2131 * @param cPages Number of pages to unlock.
2132 * @param paLocks Array of locks lock structure initialized by the mapping
2133 * function.
2134 */
2135VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2136{
2137 Assert(cPages > 0);
2138 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2139#ifdef VBOX_STRICT
2140 for (uint32_t i = 1; i < cPages; i++)
2141 {
2142 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2143 AssertPtr(paLocks[i].uPageAndType);
2144 }
2145#endif
2146
2147 PGM_LOCK_VOID(pVM);
2148 if (fWriteLock)
2149 {
2150 /*
2151 * Write locks:
2152 */
2153 for (uint32_t i = 0; i < cPages; i++)
2154 {
2155 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2156 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2157 Assert(cLocks > 0);
2158 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2159 {
2160 if (cLocks == 1)
2161 {
2162 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2163 pVM->pgm.s.cWriteLockedPages--;
2164 }
2165 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2166 }
2167
2168 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2169 { /* probably extremely likely */ }
2170 else
2171 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2172
2173 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2174 if (pMap)
2175 {
2176 Assert(pMap->cRefs >= 1);
2177 pMap->cRefs--;
2178 }
2179
2180 /* Yield the lock: */
2181 if ((i & 1023) == 1023 && i + 1 < cPages)
2182 {
2183 PGM_UNLOCK(pVM);
2184 PGM_LOCK_VOID(pVM);
2185 }
2186 }
2187 }
2188 else
2189 {
2190 /*
2191 * Read locks:
2192 */
2193 for (uint32_t i = 0; i < cPages; i++)
2194 {
2195 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2196 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2197 Assert(cLocks > 0);
2198 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2199 {
2200 if (cLocks == 1)
2201 {
2202 Assert(pVM->pgm.s.cReadLockedPages > 0);
2203 pVM->pgm.s.cReadLockedPages--;
2204 }
2205 PGM_PAGE_DEC_READ_LOCKS(pPage);
2206 }
2207
2208 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2209 if (pMap)
2210 {
2211 Assert(pMap->cRefs >= 1);
2212 pMap->cRefs--;
2213 }
2214
2215 /* Yield the lock: */
2216 if ((i & 1023) == 1023 && i + 1 < cPages)
2217 {
2218 PGM_UNLOCK(pVM);
2219 PGM_LOCK_VOID(pVM);
2220 }
2221 }
2222 }
2223 PGM_UNLOCK(pVM);
2224
2225 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2226}
2227#endif /* IN_RING3 */
2228
2229
2230/**
2231 * Release the internal mapping of a guest page.
2232 *
2233 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2234 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2235 *
2236 * @param pVM The cross context VM structure.
2237 * @param pLock The lock structure initialized by the mapping function.
2238 *
2239 * @remarks Caller must hold the PGM lock.
2240 */
2241void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2242{
2243 PGM_LOCK_ASSERT_OWNER(pVM);
2244 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2245}
2246
2247
2248/**
2249 * Converts a GC physical address to a HC ring-3 pointer.
2250 *
2251 * @returns VINF_SUCCESS on success.
2252 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2253 * page but has no physical backing.
2254 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2255 * GC physical address.
2256 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2257 * a dynamic ram chunk boundary
2258 *
2259 * @param pVM The cross context VM structure.
2260 * @param GCPhys The GC physical address to convert.
2261 * @param pR3Ptr Where to store the R3 pointer on success.
2262 *
2263 * @deprecated Avoid when possible!
2264 */
2265int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2266{
2267/** @todo this is kind of hacky and needs some more work. */
2268#ifndef DEBUG_sandervl
2269 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2270#endif
2271
2272 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2273 PGM_LOCK_VOID(pVM);
2274
2275 PPGMRAMRANGE pRam;
2276 PPGMPAGE pPage;
2277 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2278 if (RT_SUCCESS(rc))
2279 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2280
2281 PGM_UNLOCK(pVM);
2282 Assert(rc <= VINF_SUCCESS);
2283 return rc;
2284}
2285
2286
2287/**
2288 * Converts a guest pointer to a GC physical address.
2289 *
2290 * This uses the current CR3/CR0/CR4 of the guest.
2291 *
2292 * @returns VBox status code.
2293 * @param pVCpu The cross context virtual CPU structure.
2294 * @param GCPtr The guest pointer to convert.
2295 * @param pGCPhys Where to store the GC physical address.
2296 */
2297VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2298{
2299 PGMPTWALK Walk;
2300 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2301 if (pGCPhys && RT_SUCCESS(rc))
2302 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2303 return rc;
2304}
2305
2306
2307/**
2308 * Converts a guest pointer to a HC physical address.
2309 *
2310 * This uses the current CR3/CR0/CR4 of the guest.
2311 *
2312 * @returns VBox status code.
2313 * @param pVCpu The cross context virtual CPU structure.
2314 * @param GCPtr The guest pointer to convert.
2315 * @param pHCPhys Where to store the HC physical address.
2316 */
2317VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2318{
2319 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2320 PGMPTWALK Walk;
2321 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2322 if (RT_SUCCESS(rc))
2323 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2324 return rc;
2325}
2326
2327
2328
2329#undef LOG_GROUP
2330#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2331
2332
2333#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2334/**
2335 * Cache PGMPhys memory access
2336 *
2337 * @param pVM The cross context VM structure.
2338 * @param pCache Cache structure pointer
2339 * @param GCPhys GC physical address
2340 * @param pbHC HC pointer corresponding to physical page
2341 *
2342 * @thread EMT.
2343 */
2344static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2345{
2346 uint32_t iCacheIndex;
2347
2348 Assert(VM_IS_EMT(pVM));
2349
2350 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2351 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2352
2353 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2354
2355 ASMBitSet(&pCache->aEntries, iCacheIndex);
2356
2357 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2358 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2359}
2360#endif /* IN_RING3 */
2361
2362
2363/**
2364 * Deals with reading from a page with one or more ALL access handlers.
2365 *
2366 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2367 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2368 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2369 *
2370 * @param pVM The cross context VM structure.
2371 * @param pPage The page descriptor.
2372 * @param GCPhys The physical address to start reading at.
2373 * @param pvBuf Where to put the bits we read.
2374 * @param cb How much to read - less or equal to a page.
2375 * @param enmOrigin The origin of this call.
2376 */
2377static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2378 PGMACCESSORIGIN enmOrigin)
2379{
2380 /*
2381 * The most frequent access here is MMIO and shadowed ROM.
2382 * The current code ASSUMES all these access handlers covers full pages!
2383 */
2384
2385 /*
2386 * Whatever we do we need the source page, map it first.
2387 */
2388 PGMPAGEMAPLOCK PgMpLck;
2389 const void *pvSrc = NULL;
2390 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2391/** @todo Check how this can work for MMIO pages? */
2392 if (RT_FAILURE(rc))
2393 {
2394 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2395 GCPhys, pPage, rc));
2396 memset(pvBuf, 0xff, cb);
2397 return VINF_SUCCESS;
2398 }
2399
2400 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2401
2402 /*
2403 * Deal with any physical handlers.
2404 */
2405 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2406 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2407 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2408 {
2409 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2410 if (pCur)
2411 {
2412 Assert(pCur && GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2413 Assert((pCur->Core.Key & GUEST_PAGE_OFFSET_MASK) == 0);
2414 Assert((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2415#ifndef IN_RING3
2416 if (enmOrigin != PGMACCESSORIGIN_IEM)
2417 {
2418 /* Cannot reliably handle informational status codes in this context */
2419 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2420 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2421 }
2422#endif
2423 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2424 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
2425 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2426 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2427
2428 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2429 STAM_PROFILE_START(&pCur->Stat, h);
2430 PGM_LOCK_ASSERT_OWNER(pVM);
2431
2432 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2433 PGM_UNLOCK(pVM);
2434 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2435 PGM_LOCK_VOID(pVM);
2436
2437#ifdef VBOX_WITH_STATISTICS
2438 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2439 if (pCur)
2440 STAM_PROFILE_STOP(&pCur->Stat, h);
2441#else
2442 pCur = NULL; /* might not be valid anymore. */
2443#endif
2444 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2445 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2446 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2447 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2448 {
2449 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2450 return rcStrict;
2451 }
2452 }
2453 else
2454 AssertLogRelMsgFailed(("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2455 }
2456
2457 /*
2458 * Take the default action.
2459 */
2460 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2461 {
2462 memcpy(pvBuf, pvSrc, cb);
2463 rcStrict = VINF_SUCCESS;
2464 }
2465 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2466 return rcStrict;
2467}
2468
2469
2470/**
2471 * Read physical memory.
2472 *
2473 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2474 * want to ignore those.
2475 *
2476 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2477 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2478 * @retval VINF_SUCCESS in all context - read completed.
2479 *
2480 * @retval VINF_EM_OFF in RC and R0 - read completed.
2481 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2482 * @retval VINF_EM_RESET in RC and R0 - read completed.
2483 * @retval VINF_EM_HALT in RC and R0 - read completed.
2484 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2485 *
2486 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2487 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2488 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2489 *
2490 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2491 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2492 *
2493 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2494 *
2495 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2496 * haven't been cleared for strict status codes yet.
2497 *
2498 * @param pVM The cross context VM structure.
2499 * @param GCPhys Physical address start reading from.
2500 * @param pvBuf Where to put the read bits.
2501 * @param cbRead How many bytes to read.
2502 * @param enmOrigin The origin of this call.
2503 */
2504VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2505{
2506 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2507 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2508
2509 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2510 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2511
2512 PGM_LOCK_VOID(pVM);
2513
2514 /*
2515 * Copy loop on ram ranges.
2516 */
2517 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2518 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2519 for (;;)
2520 {
2521 /* Inside range or not? */
2522 if (pRam && GCPhys >= pRam->GCPhys)
2523 {
2524 /*
2525 * Must work our way thru this page by page.
2526 */
2527 RTGCPHYS off = GCPhys - pRam->GCPhys;
2528 while (off < pRam->cb)
2529 {
2530 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2531 PPGMPAGE pPage = &pRam->aPages[iPage];
2532 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2533 if (cb > cbRead)
2534 cb = cbRead;
2535
2536 /*
2537 * Normal page? Get the pointer to it.
2538 */
2539 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2540 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2541 {
2542 /*
2543 * Get the pointer to the page.
2544 */
2545 PGMPAGEMAPLOCK PgMpLck;
2546 const void *pvSrc;
2547 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2548 if (RT_SUCCESS(rc))
2549 {
2550 memcpy(pvBuf, pvSrc, cb);
2551 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2552 }
2553 else
2554 {
2555 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2556 pRam->GCPhys + off, pPage, rc));
2557 memset(pvBuf, 0xff, cb);
2558 }
2559 }
2560 /*
2561 * Have ALL/MMIO access handlers.
2562 */
2563 else
2564 {
2565 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2566 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2567 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2568 else
2569 {
2570 memset(pvBuf, 0xff, cb);
2571 PGM_UNLOCK(pVM);
2572 return rcStrict2;
2573 }
2574 }
2575
2576 /* next page */
2577 if (cb >= cbRead)
2578 {
2579 PGM_UNLOCK(pVM);
2580 return rcStrict;
2581 }
2582 cbRead -= cb;
2583 off += cb;
2584 pvBuf = (char *)pvBuf + cb;
2585 } /* walk pages in ram range. */
2586
2587 GCPhys = pRam->GCPhysLast + 1;
2588 }
2589 else
2590 {
2591 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2592
2593 /*
2594 * Unassigned address space.
2595 */
2596 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2597 if (cb >= cbRead)
2598 {
2599 memset(pvBuf, 0xff, cbRead);
2600 break;
2601 }
2602 memset(pvBuf, 0xff, cb);
2603
2604 cbRead -= cb;
2605 pvBuf = (char *)pvBuf + cb;
2606 GCPhys += cb;
2607 }
2608
2609 /* Advance range if necessary. */
2610 while (pRam && GCPhys > pRam->GCPhysLast)
2611 pRam = pRam->CTX_SUFF(pNext);
2612 } /* Ram range walk */
2613
2614 PGM_UNLOCK(pVM);
2615 return rcStrict;
2616}
2617
2618
2619/**
2620 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2621 *
2622 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2623 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2624 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2625 *
2626 * @param pVM The cross context VM structure.
2627 * @param pPage The page descriptor.
2628 * @param GCPhys The physical address to start writing at.
2629 * @param pvBuf What to write.
2630 * @param cbWrite How much to write - less or equal to a page.
2631 * @param enmOrigin The origin of this call.
2632 */
2633static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2634 PGMACCESSORIGIN enmOrigin)
2635{
2636 PGMPAGEMAPLOCK PgMpLck;
2637 void *pvDst = NULL;
2638 VBOXSTRICTRC rcStrict;
2639
2640 /*
2641 * Give priority to physical handlers (like #PF does).
2642 *
2643 * Hope for a lonely physical handler first that covers the whole write
2644 * area. This should be a pretty frequent case with MMIO and the heavy
2645 * usage of full page handlers in the page pool.
2646 */
2647 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2648 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2649 if (pCur)
2650 {
2651 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2652#ifndef IN_RING3
2653 if (enmOrigin != PGMACCESSORIGIN_IEM)
2654 /* Cannot reliably handle informational status codes in this context */
2655 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2656#endif
2657 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2658 if (cbRange > cbWrite)
2659 cbRange = cbWrite;
2660
2661 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
2662 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2663 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2664 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2665 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2666 else
2667 rcStrict = VINF_SUCCESS;
2668 if (RT_SUCCESS(rcStrict))
2669 {
2670 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2671 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2672 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2673 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2674 STAM_PROFILE_START(&pCur->Stat, h);
2675
2676 /* Most handlers will want to release the PGM lock for deadlock prevention
2677 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2678 dirty page trackers will want to keep it for performance reasons. */
2679 PGM_LOCK_ASSERT_OWNER(pVM);
2680 if (pCurType->fKeepPgmLock)
2681 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2682 else
2683 {
2684 PGM_UNLOCK(pVM);
2685 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2686 PGM_LOCK_VOID(pVM);
2687 }
2688
2689#ifdef VBOX_WITH_STATISTICS
2690 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2691 if (pCur)
2692 STAM_PROFILE_STOP(&pCur->Stat, h);
2693#else
2694 pCur = NULL; /* might not be valid anymore. */
2695#endif
2696 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2697 {
2698 if (pvDst)
2699 memcpy(pvDst, pvBuf, cbRange);
2700 rcStrict = VINF_SUCCESS;
2701 }
2702 else
2703 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2704 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2705 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2706 }
2707 else
2708 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2709 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2710 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2711 {
2712 if (pvDst)
2713 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2714 return rcStrict;
2715 }
2716
2717 /* more fun to be had below */
2718 cbWrite -= cbRange;
2719 GCPhys += cbRange;
2720 pvBuf = (uint8_t *)pvBuf + cbRange;
2721 pvDst = (uint8_t *)pvDst + cbRange;
2722 }
2723 else /* The handler is somewhere else in the page, deal with it below. */
2724 rcStrict = VINF_SUCCESS;
2725 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
2726
2727 /*
2728 * Deal with all the odd ends (used to be deal with virt+phys).
2729 */
2730 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2731
2732 /* We need a writable destination page. */
2733 if (!pvDst)
2734 {
2735 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2736 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2737 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2738 rc2);
2739 }
2740
2741 /** @todo clean up this code some more now there are no virtual handlers any
2742 * more. */
2743 /* The loop state (big + ugly). */
2744 PPGMPHYSHANDLER pPhys = NULL;
2745 uint32_t offPhys = GUEST_PAGE_SIZE;
2746 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2747 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2748
2749 /* The loop. */
2750 for (;;)
2751 {
2752 if (fMorePhys && !pPhys)
2753 {
2754 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2755 if (pPhys)
2756 {
2757 offPhys = 0;
2758 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2759 }
2760 else
2761 {
2762 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2763 GCPhys, true /* fAbove */);
2764 if ( pPhys
2765 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2766 {
2767 offPhys = pPhys->Core.Key - GCPhys;
2768 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2769 }
2770 else
2771 {
2772 pPhys = NULL;
2773 fMorePhys = false;
2774 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2775 }
2776 }
2777 }
2778
2779 /*
2780 * Handle access to space without handlers (that's easy).
2781 */
2782 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2783 uint32_t cbRange = (uint32_t)cbWrite;
2784 Assert(cbRange == cbWrite);
2785
2786 /*
2787 * Physical handler.
2788 */
2789 if (!offPhys)
2790 {
2791#ifndef IN_RING3
2792 if (enmOrigin != PGMACCESSORIGIN_IEM)
2793 /* Cannot reliably handle informational status codes in this context */
2794 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2795#endif
2796 if (cbRange > offPhysLast + 1)
2797 cbRange = offPhysLast + 1;
2798
2799 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
2800 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2801 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2802 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2803
2804 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2805 STAM_PROFILE_START(&pPhys->Stat, h);
2806
2807 /* Most handlers will want to release the PGM lock for deadlock prevention
2808 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2809 dirty page trackers will want to keep it for performance reasons. */
2810 PGM_LOCK_ASSERT_OWNER(pVM);
2811 if (pCurType->fKeepPgmLock)
2812 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2813 else
2814 {
2815 PGM_UNLOCK(pVM);
2816 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2817 PGM_LOCK_VOID(pVM);
2818 }
2819
2820#ifdef VBOX_WITH_STATISTICS
2821 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2822 if (pPhys)
2823 STAM_PROFILE_STOP(&pPhys->Stat, h);
2824#else
2825 pPhys = NULL; /* might not be valid anymore. */
2826#endif
2827 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2828 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2829 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2830 }
2831
2832 /*
2833 * Execute the default action and merge the status codes.
2834 */
2835 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2836 {
2837 memcpy(pvDst, pvBuf, cbRange);
2838 rcStrict2 = VINF_SUCCESS;
2839 }
2840 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2841 {
2842 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2843 return rcStrict2;
2844 }
2845 else
2846 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2847
2848 /*
2849 * Advance if we've got more stuff to do.
2850 */
2851 if (cbRange >= cbWrite)
2852 {
2853 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2854 return rcStrict;
2855 }
2856
2857
2858 cbWrite -= cbRange;
2859 GCPhys += cbRange;
2860 pvBuf = (uint8_t *)pvBuf + cbRange;
2861 pvDst = (uint8_t *)pvDst + cbRange;
2862
2863 offPhys -= cbRange;
2864 offPhysLast -= cbRange;
2865 }
2866}
2867
2868
2869/**
2870 * Write to physical memory.
2871 *
2872 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2873 * want to ignore those.
2874 *
2875 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2876 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2877 * @retval VINF_SUCCESS in all context - write completed.
2878 *
2879 * @retval VINF_EM_OFF in RC and R0 - write completed.
2880 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2881 * @retval VINF_EM_RESET in RC and R0 - write completed.
2882 * @retval VINF_EM_HALT in RC and R0 - write completed.
2883 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2884 *
2885 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2886 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2887 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2888 *
2889 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2890 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2891 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2892 *
2893 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2894 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2895 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2896 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2897 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2898 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2899 *
2900 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2901 * haven't been cleared for strict status codes yet.
2902 *
2903 *
2904 * @param pVM The cross context VM structure.
2905 * @param GCPhys Physical address to write to.
2906 * @param pvBuf What to write.
2907 * @param cbWrite How many bytes to write.
2908 * @param enmOrigin Who is calling.
2909 */
2910VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2911{
2912 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2913 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2914 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2915
2916 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2917 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2918
2919 PGM_LOCK_VOID(pVM);
2920
2921 /*
2922 * Copy loop on ram ranges.
2923 */
2924 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2925 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2926 for (;;)
2927 {
2928 /* Inside range or not? */
2929 if (pRam && GCPhys >= pRam->GCPhys)
2930 {
2931 /*
2932 * Must work our way thru this page by page.
2933 */
2934 RTGCPTR off = GCPhys - pRam->GCPhys;
2935 while (off < pRam->cb)
2936 {
2937 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
2938 PPGMPAGE pPage = &pRam->aPages[iPage];
2939 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2940 if (cb > cbWrite)
2941 cb = cbWrite;
2942
2943 /*
2944 * Normal page? Get the pointer to it.
2945 */
2946 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2947 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2948 {
2949 PGMPAGEMAPLOCK PgMpLck;
2950 void *pvDst;
2951 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2952 if (RT_SUCCESS(rc))
2953 {
2954 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2955 memcpy(pvDst, pvBuf, cb);
2956 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2957 }
2958 /* Ignore writes to ballooned pages. */
2959 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2960 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2961 pRam->GCPhys + off, pPage, rc));
2962 }
2963 /*
2964 * Active WRITE or ALL access handlers.
2965 */
2966 else
2967 {
2968 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2969 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2970 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2971 else
2972 {
2973 PGM_UNLOCK(pVM);
2974 return rcStrict2;
2975 }
2976 }
2977
2978 /* next page */
2979 if (cb >= cbWrite)
2980 {
2981 PGM_UNLOCK(pVM);
2982 return rcStrict;
2983 }
2984
2985 cbWrite -= cb;
2986 off += cb;
2987 pvBuf = (const char *)pvBuf + cb;
2988 } /* walk pages in ram range */
2989
2990 GCPhys = pRam->GCPhysLast + 1;
2991 }
2992 else
2993 {
2994 /*
2995 * Unassigned address space, skip it.
2996 */
2997 if (!pRam)
2998 break;
2999 size_t cb = pRam->GCPhys - GCPhys;
3000 if (cb >= cbWrite)
3001 break;
3002 cbWrite -= cb;
3003 pvBuf = (const char *)pvBuf + cb;
3004 GCPhys += cb;
3005 }
3006
3007 /* Advance range if necessary. */
3008 while (pRam && GCPhys > pRam->GCPhysLast)
3009 pRam = pRam->CTX_SUFF(pNext);
3010 } /* Ram range walk */
3011
3012 PGM_UNLOCK(pVM);
3013 return rcStrict;
3014}
3015
3016
3017/**
3018 * Read from guest physical memory by GC physical address, bypassing
3019 * MMIO and access handlers.
3020 *
3021 * @returns VBox status code.
3022 * @param pVM The cross context VM structure.
3023 * @param pvDst The destination address.
3024 * @param GCPhysSrc The source address (GC physical address).
3025 * @param cb The number of bytes to read.
3026 */
3027VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3028{
3029 /*
3030 * Treat the first page as a special case.
3031 */
3032 if (!cb)
3033 return VINF_SUCCESS;
3034
3035 /* map the 1st page */
3036 void const *pvSrc;
3037 PGMPAGEMAPLOCK Lock;
3038 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3039 if (RT_FAILURE(rc))
3040 return rc;
3041
3042 /* optimize for the case where access is completely within the first page. */
3043 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3044 if (RT_LIKELY(cb <= cbPage))
3045 {
3046 memcpy(pvDst, pvSrc, cb);
3047 PGMPhysReleasePageMappingLock(pVM, &Lock);
3048 return VINF_SUCCESS;
3049 }
3050
3051 /* copy to the end of the page. */
3052 memcpy(pvDst, pvSrc, cbPage);
3053 PGMPhysReleasePageMappingLock(pVM, &Lock);
3054 GCPhysSrc += cbPage;
3055 pvDst = (uint8_t *)pvDst + cbPage;
3056 cb -= cbPage;
3057
3058 /*
3059 * Page by page.
3060 */
3061 for (;;)
3062 {
3063 /* map the page */
3064 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3065 if (RT_FAILURE(rc))
3066 return rc;
3067
3068 /* last page? */
3069 if (cb <= GUEST_PAGE_SIZE)
3070 {
3071 memcpy(pvDst, pvSrc, cb);
3072 PGMPhysReleasePageMappingLock(pVM, &Lock);
3073 return VINF_SUCCESS;
3074 }
3075
3076 /* copy the entire page and advance */
3077 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3078 PGMPhysReleasePageMappingLock(pVM, &Lock);
3079 GCPhysSrc += GUEST_PAGE_SIZE;
3080 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3081 cb -= GUEST_PAGE_SIZE;
3082 }
3083 /* won't ever get here. */
3084}
3085
3086
3087/**
3088 * Write to guest physical memory referenced by GC pointer.
3089 * Write memory to GC physical address in guest physical memory.
3090 *
3091 * This will bypass MMIO and access handlers.
3092 *
3093 * @returns VBox status code.
3094 * @param pVM The cross context VM structure.
3095 * @param GCPhysDst The GC physical address of the destination.
3096 * @param pvSrc The source buffer.
3097 * @param cb The number of bytes to write.
3098 */
3099VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3100{
3101 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3102
3103 /*
3104 * Treat the first page as a special case.
3105 */
3106 if (!cb)
3107 return VINF_SUCCESS;
3108
3109 /* map the 1st page */
3110 void *pvDst;
3111 PGMPAGEMAPLOCK Lock;
3112 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3113 if (RT_FAILURE(rc))
3114 return rc;
3115
3116 /* optimize for the case where access is completely within the first page. */
3117 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3118 if (RT_LIKELY(cb <= cbPage))
3119 {
3120 memcpy(pvDst, pvSrc, cb);
3121 PGMPhysReleasePageMappingLock(pVM, &Lock);
3122 return VINF_SUCCESS;
3123 }
3124
3125 /* copy to the end of the page. */
3126 memcpy(pvDst, pvSrc, cbPage);
3127 PGMPhysReleasePageMappingLock(pVM, &Lock);
3128 GCPhysDst += cbPage;
3129 pvSrc = (const uint8_t *)pvSrc + cbPage;
3130 cb -= cbPage;
3131
3132 /*
3133 * Page by page.
3134 */
3135 for (;;)
3136 {
3137 /* map the page */
3138 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3139 if (RT_FAILURE(rc))
3140 return rc;
3141
3142 /* last page? */
3143 if (cb <= GUEST_PAGE_SIZE)
3144 {
3145 memcpy(pvDst, pvSrc, cb);
3146 PGMPhysReleasePageMappingLock(pVM, &Lock);
3147 return VINF_SUCCESS;
3148 }
3149
3150 /* copy the entire page and advance */
3151 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3152 PGMPhysReleasePageMappingLock(pVM, &Lock);
3153 GCPhysDst += GUEST_PAGE_SIZE;
3154 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3155 cb -= GUEST_PAGE_SIZE;
3156 }
3157 /* won't ever get here. */
3158}
3159
3160
3161/**
3162 * Read from guest physical memory referenced by GC pointer.
3163 *
3164 * This function uses the current CR3/CR0/CR4 of the guest and will
3165 * bypass access handlers and not set any accessed bits.
3166 *
3167 * @returns VBox status code.
3168 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3169 * @param pvDst The destination address.
3170 * @param GCPtrSrc The source address (GC pointer).
3171 * @param cb The number of bytes to read.
3172 */
3173VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3174{
3175 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3176/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3177
3178 /*
3179 * Treat the first page as a special case.
3180 */
3181 if (!cb)
3182 return VINF_SUCCESS;
3183
3184 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3185 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3186
3187 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3188 * when many VCPUs are fighting for the lock.
3189 */
3190 PGM_LOCK_VOID(pVM);
3191
3192 /* map the 1st page */
3193 void const *pvSrc;
3194 PGMPAGEMAPLOCK Lock;
3195 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3196 if (RT_FAILURE(rc))
3197 {
3198 PGM_UNLOCK(pVM);
3199 return rc;
3200 }
3201
3202 /* optimize for the case where access is completely within the first page. */
3203 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3204 if (RT_LIKELY(cb <= cbPage))
3205 {
3206 memcpy(pvDst, pvSrc, cb);
3207 PGMPhysReleasePageMappingLock(pVM, &Lock);
3208 PGM_UNLOCK(pVM);
3209 return VINF_SUCCESS;
3210 }
3211
3212 /* copy to the end of the page. */
3213 memcpy(pvDst, pvSrc, cbPage);
3214 PGMPhysReleasePageMappingLock(pVM, &Lock);
3215 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3216 pvDst = (uint8_t *)pvDst + cbPage;
3217 cb -= cbPage;
3218
3219 /*
3220 * Page by page.
3221 */
3222 for (;;)
3223 {
3224 /* map the page */
3225 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3226 if (RT_FAILURE(rc))
3227 {
3228 PGM_UNLOCK(pVM);
3229 return rc;
3230 }
3231
3232 /* last page? */
3233 if (cb <= GUEST_PAGE_SIZE)
3234 {
3235 memcpy(pvDst, pvSrc, cb);
3236 PGMPhysReleasePageMappingLock(pVM, &Lock);
3237 PGM_UNLOCK(pVM);
3238 return VINF_SUCCESS;
3239 }
3240
3241 /* copy the entire page and advance */
3242 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3243 PGMPhysReleasePageMappingLock(pVM, &Lock);
3244 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3245 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3246 cb -= GUEST_PAGE_SIZE;
3247 }
3248 /* won't ever get here. */
3249}
3250
3251
3252/**
3253 * Write to guest physical memory referenced by GC pointer.
3254 *
3255 * This function uses the current CR3/CR0/CR4 of the guest and will
3256 * bypass access handlers and not set dirty or accessed bits.
3257 *
3258 * @returns VBox status code.
3259 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3260 * @param GCPtrDst The destination address (GC pointer).
3261 * @param pvSrc The source address.
3262 * @param cb The number of bytes to write.
3263 */
3264VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3265{
3266 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3267 VMCPU_ASSERT_EMT(pVCpu);
3268
3269 /*
3270 * Treat the first page as a special case.
3271 */
3272 if (!cb)
3273 return VINF_SUCCESS;
3274
3275 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3276 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3277
3278 /* map the 1st page */
3279 void *pvDst;
3280 PGMPAGEMAPLOCK Lock;
3281 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3282 if (RT_FAILURE(rc))
3283 return rc;
3284
3285 /* optimize for the case where access is completely within the first page. */
3286 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3287 if (RT_LIKELY(cb <= cbPage))
3288 {
3289 memcpy(pvDst, pvSrc, cb);
3290 PGMPhysReleasePageMappingLock(pVM, &Lock);
3291 return VINF_SUCCESS;
3292 }
3293
3294 /* copy to the end of the page. */
3295 memcpy(pvDst, pvSrc, cbPage);
3296 PGMPhysReleasePageMappingLock(pVM, &Lock);
3297 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3298 pvSrc = (const uint8_t *)pvSrc + cbPage;
3299 cb -= cbPage;
3300
3301 /*
3302 * Page by page.
3303 */
3304 for (;;)
3305 {
3306 /* map the page */
3307 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3308 if (RT_FAILURE(rc))
3309 return rc;
3310
3311 /* last page? */
3312 if (cb <= GUEST_PAGE_SIZE)
3313 {
3314 memcpy(pvDst, pvSrc, cb);
3315 PGMPhysReleasePageMappingLock(pVM, &Lock);
3316 return VINF_SUCCESS;
3317 }
3318
3319 /* copy the entire page and advance */
3320 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3321 PGMPhysReleasePageMappingLock(pVM, &Lock);
3322 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3323 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3324 cb -= GUEST_PAGE_SIZE;
3325 }
3326 /* won't ever get here. */
3327}
3328
3329
3330/**
3331 * Write to guest physical memory referenced by GC pointer and update the PTE.
3332 *
3333 * This function uses the current CR3/CR0/CR4 of the guest and will
3334 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3335 *
3336 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3337 *
3338 * @returns VBox status code.
3339 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3340 * @param GCPtrDst The destination address (GC pointer).
3341 * @param pvSrc The source address.
3342 * @param cb The number of bytes to write.
3343 */
3344VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3345{
3346 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3347 VMCPU_ASSERT_EMT(pVCpu);
3348
3349 /*
3350 * Treat the first page as a special case.
3351 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3352 */
3353 if (!cb)
3354 return VINF_SUCCESS;
3355
3356 /* map the 1st page */
3357 void *pvDst;
3358 PGMPAGEMAPLOCK Lock;
3359 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3360 if (RT_FAILURE(rc))
3361 return rc;
3362
3363 /* optimize for the case where access is completely within the first page. */
3364 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3365 if (RT_LIKELY(cb <= cbPage))
3366 {
3367 memcpy(pvDst, pvSrc, cb);
3368 PGMPhysReleasePageMappingLock(pVM, &Lock);
3369 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3370 return VINF_SUCCESS;
3371 }
3372
3373 /* copy to the end of the page. */
3374 memcpy(pvDst, pvSrc, cbPage);
3375 PGMPhysReleasePageMappingLock(pVM, &Lock);
3376 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3377 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3378 pvSrc = (const uint8_t *)pvSrc + cbPage;
3379 cb -= cbPage;
3380
3381 /*
3382 * Page by page.
3383 */
3384 for (;;)
3385 {
3386 /* map the page */
3387 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3388 if (RT_FAILURE(rc))
3389 return rc;
3390
3391 /* last page? */
3392 if (cb <= GUEST_PAGE_SIZE)
3393 {
3394 memcpy(pvDst, pvSrc, cb);
3395 PGMPhysReleasePageMappingLock(pVM, &Lock);
3396 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3397 return VINF_SUCCESS;
3398 }
3399
3400 /* copy the entire page and advance */
3401 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3402 PGMPhysReleasePageMappingLock(pVM, &Lock);
3403 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3404 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3405 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3406 cb -= GUEST_PAGE_SIZE;
3407 }
3408 /* won't ever get here. */
3409}
3410
3411
3412/**
3413 * Read from guest physical memory referenced by GC pointer.
3414 *
3415 * This function uses the current CR3/CR0/CR4 of the guest and will
3416 * respect access handlers and set accessed bits.
3417 *
3418 * @returns Strict VBox status, see PGMPhysRead for details.
3419 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3420 * specified virtual address.
3421 *
3422 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3423 * @param pvDst The destination address.
3424 * @param GCPtrSrc The source address (GC pointer).
3425 * @param cb The number of bytes to read.
3426 * @param enmOrigin Who is calling.
3427 * @thread EMT(pVCpu)
3428 */
3429VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3430{
3431 int rc;
3432 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3433 VMCPU_ASSERT_EMT(pVCpu);
3434
3435 /*
3436 * Anything to do?
3437 */
3438 if (!cb)
3439 return VINF_SUCCESS;
3440
3441 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3442
3443 /*
3444 * Optimize reads within a single page.
3445 */
3446 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3447 {
3448 /* Convert virtual to physical address + flags */
3449 PGMPTWALK Walk;
3450 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3451 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3452 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3453
3454 /* mark the guest page as accessed. */
3455 if (!(Walk.fEffective & X86_PTE_A))
3456 {
3457 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3458 AssertRC(rc);
3459 }
3460
3461 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3462 }
3463
3464 /*
3465 * Page by page.
3466 */
3467 for (;;)
3468 {
3469 /* Convert virtual to physical address + flags */
3470 PGMPTWALK Walk;
3471 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3472 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3473 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3474
3475 /* mark the guest page as accessed. */
3476 if (!(Walk.fEffective & X86_PTE_A))
3477 {
3478 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3479 AssertRC(rc);
3480 }
3481
3482 /* copy */
3483 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3484 if (cbRead < cb)
3485 {
3486 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3487 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3488 { /* likely */ }
3489 else
3490 return rcStrict;
3491 }
3492 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3493 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3494
3495 /* next */
3496 Assert(cb > cbRead);
3497 cb -= cbRead;
3498 pvDst = (uint8_t *)pvDst + cbRead;
3499 GCPtrSrc += cbRead;
3500 }
3501}
3502
3503
3504/**
3505 * Write to guest physical memory referenced by GC pointer.
3506 *
3507 * This function uses the current CR3/CR0/CR4 of the guest and will
3508 * respect access handlers and set dirty and accessed bits.
3509 *
3510 * @returns Strict VBox status, see PGMPhysWrite for details.
3511 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3512 * specified virtual address.
3513 *
3514 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3515 * @param GCPtrDst The destination address (GC pointer).
3516 * @param pvSrc The source address.
3517 * @param cb The number of bytes to write.
3518 * @param enmOrigin Who is calling.
3519 */
3520VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3521{
3522 int rc;
3523 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3524 VMCPU_ASSERT_EMT(pVCpu);
3525
3526 /*
3527 * Anything to do?
3528 */
3529 if (!cb)
3530 return VINF_SUCCESS;
3531
3532 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3533
3534 /*
3535 * Optimize writes within a single page.
3536 */
3537 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3538 {
3539 /* Convert virtual to physical address + flags */
3540 PGMPTWALK Walk;
3541 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3542 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3543 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3544
3545 /* Mention when we ignore X86_PTE_RW... */
3546 if (!(Walk.fEffective & X86_PTE_RW))
3547 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3548
3549 /* Mark the guest page as accessed and dirty if necessary. */
3550 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3551 {
3552 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3553 AssertRC(rc);
3554 }
3555
3556 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3557 }
3558
3559 /*
3560 * Page by page.
3561 */
3562 for (;;)
3563 {
3564 /* Convert virtual to physical address + flags */
3565 PGMPTWALK Walk;
3566 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3567 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3568 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3569
3570 /* Mention when we ignore X86_PTE_RW... */
3571 if (!(Walk.fEffective & X86_PTE_RW))
3572 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3573
3574 /* Mark the guest page as accessed and dirty if necessary. */
3575 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3576 {
3577 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3578 AssertRC(rc);
3579 }
3580
3581 /* copy */
3582 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3583 if (cbWrite < cb)
3584 {
3585 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3586 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3587 { /* likely */ }
3588 else
3589 return rcStrict;
3590 }
3591 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3592 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3593
3594 /* next */
3595 Assert(cb > cbWrite);
3596 cb -= cbWrite;
3597 pvSrc = (uint8_t *)pvSrc + cbWrite;
3598 GCPtrDst += cbWrite;
3599 }
3600}
3601
3602
3603/**
3604 * Return the page type of the specified physical address.
3605 *
3606 * @returns The page type.
3607 * @param pVM The cross context VM structure.
3608 * @param GCPhys Guest physical address
3609 */
3610VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3611{
3612 PGM_LOCK_VOID(pVM);
3613 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3614 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3615 PGM_UNLOCK(pVM);
3616
3617 return enmPgType;
3618}
3619
3620
3621/**
3622 * Converts a GC physical address to a HC ring-3 pointer, with some
3623 * additional checks.
3624 *
3625 * @returns VBox status code (no informational statuses).
3626 *
3627 * @param pVM The cross context VM structure.
3628 * @param pVCpu The cross context virtual CPU structure of the
3629 * calling EMT.
3630 * @param GCPhys The GC physical address to convert. This API mask
3631 * the A20 line when necessary.
3632 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3633 * be done while holding the PGM lock.
3634 * @param ppb Where to store the pointer corresponding to GCPhys
3635 * on success.
3636 * @param pfTlb The TLB flags and revision. We only add stuff.
3637 *
3638 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3639 * PGMPhysIemGCPhys2Ptr.
3640 *
3641 * @thread EMT(pVCpu).
3642 */
3643VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3644 R3R0PTRTYPE(uint8_t *) *ppb,
3645 uint64_t *pfTlb)
3646{
3647 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3648 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3649
3650 PGM_LOCK_VOID(pVM);
3651
3652 PPGMRAMRANGE pRam;
3653 PPGMPAGE pPage;
3654 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3655 if (RT_SUCCESS(rc))
3656 {
3657 if (!PGM_PAGE_IS_BALLOONED(pPage))
3658 {
3659 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3660 {
3661 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3662 {
3663 /*
3664 * No access handler.
3665 */
3666 switch (PGM_PAGE_GET_STATE(pPage))
3667 {
3668 case PGM_PAGE_STATE_ALLOCATED:
3669 *pfTlb |= *puTlbPhysRev;
3670 break;
3671 case PGM_PAGE_STATE_BALLOONED:
3672 AssertFailed();
3673 RT_FALL_THRU();
3674 case PGM_PAGE_STATE_ZERO:
3675 case PGM_PAGE_STATE_SHARED:
3676 case PGM_PAGE_STATE_WRITE_MONITORED:
3677 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3678 break;
3679 }
3680
3681 PPGMPAGEMAPTLBE pTlbe;
3682 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3683 AssertLogRelRCReturn(rc, rc);
3684 *ppb = (uint8_t *)pTlbe->pv;
3685 }
3686 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3687 {
3688 /*
3689 * MMIO or similar all access handler: Catch all access.
3690 */
3691 *pfTlb |= *puTlbPhysRev
3692 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3693 *ppb = NULL;
3694 }
3695 else
3696 {
3697 /*
3698 * Write access handler: Catch write accesses if active.
3699 */
3700 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3701 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3702 else
3703 switch (PGM_PAGE_GET_STATE(pPage))
3704 {
3705 case PGM_PAGE_STATE_ALLOCATED:
3706 *pfTlb |= *puTlbPhysRev;
3707 break;
3708 case PGM_PAGE_STATE_BALLOONED:
3709 AssertFailed();
3710 RT_FALL_THRU();
3711 case PGM_PAGE_STATE_ZERO:
3712 case PGM_PAGE_STATE_SHARED:
3713 case PGM_PAGE_STATE_WRITE_MONITORED:
3714 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3715 break;
3716 }
3717
3718 PPGMPAGEMAPTLBE pTlbe;
3719 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3720 AssertLogRelRCReturn(rc, rc);
3721 *ppb = (uint8_t *)pTlbe->pv;
3722 }
3723 }
3724 else
3725 {
3726 /* Alias MMIO: For now, we catch all access. */
3727 *pfTlb |= *puTlbPhysRev
3728 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3729 *ppb = NULL;
3730 }
3731 }
3732 else
3733 {
3734 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3735 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3736 *ppb = NULL;
3737 }
3738 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3739 }
3740 else
3741 {
3742 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3743 *ppb = NULL;
3744 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3745 }
3746
3747 PGM_UNLOCK(pVM);
3748 return VINF_SUCCESS;
3749}
3750
3751
3752/**
3753 * Converts a GC physical address to a HC ring-3 pointer, with some
3754 * additional checks.
3755 *
3756 * @returns VBox status code (no informational statuses).
3757 * @retval VINF_SUCCESS on success.
3758 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3759 * access handler of some kind.
3760 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3761 * accesses or is odd in any way.
3762 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3763 *
3764 * @param pVM The cross context VM structure.
3765 * @param pVCpu The cross context virtual CPU structure of the
3766 * calling EMT.
3767 * @param GCPhys The GC physical address to convert. This API mask
3768 * the A20 line when necessary.
3769 * @param fWritable Whether write access is required.
3770 * @param fByPassHandlers Whether to bypass access handlers.
3771 * @param ppv Where to store the pointer corresponding to GCPhys
3772 * on success.
3773 * @param pLock
3774 *
3775 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3776 * @thread EMT(pVCpu).
3777 */
3778VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3779 void **ppv, PPGMPAGEMAPLOCK pLock)
3780{
3781 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3782
3783 PGM_LOCK_VOID(pVM);
3784
3785 PPGMRAMRANGE pRam;
3786 PPGMPAGE pPage;
3787 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3788 if (RT_SUCCESS(rc))
3789 {
3790 if (PGM_PAGE_IS_BALLOONED(pPage))
3791 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3792 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3793 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3794 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3795 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3796 rc = VINF_SUCCESS;
3797 else
3798 {
3799 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3800 {
3801 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3802 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3803 }
3804 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3805 {
3806 Assert(!fByPassHandlers);
3807 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3808 }
3809 }
3810 if (RT_SUCCESS(rc))
3811 {
3812 int rc2;
3813
3814 /* Make sure what we return is writable. */
3815 if (fWritable)
3816 switch (PGM_PAGE_GET_STATE(pPage))
3817 {
3818 case PGM_PAGE_STATE_ALLOCATED:
3819 break;
3820 case PGM_PAGE_STATE_BALLOONED:
3821 AssertFailed();
3822 break;
3823 case PGM_PAGE_STATE_ZERO:
3824 case PGM_PAGE_STATE_SHARED:
3825 case PGM_PAGE_STATE_WRITE_MONITORED:
3826 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3827 AssertLogRelRCReturn(rc2, rc2);
3828 break;
3829 }
3830
3831 /* Get a ring-3 mapping of the address. */
3832 PPGMPAGEMAPTLBE pTlbe;
3833 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3834 AssertLogRelRCReturn(rc2, rc2);
3835
3836 /* Lock it and calculate the address. */
3837 if (fWritable)
3838 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3839 else
3840 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3841 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3842
3843 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3844 }
3845 else
3846 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3847
3848 /* else: handler catching all access, no pointer returned. */
3849 }
3850 else
3851 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3852
3853 PGM_UNLOCK(pVM);
3854 return rc;
3855}
3856
3857
3858/**
3859 * Checks if the give GCPhys page requires special handling for the given access
3860 * because it's MMIO or otherwise monitored.
3861 *
3862 * @returns VBox status code (no informational statuses).
3863 * @retval VINF_SUCCESS on success.
3864 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3865 * access handler of some kind.
3866 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3867 * accesses or is odd in any way.
3868 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3869 *
3870 * @param pVM The cross context VM structure.
3871 * @param GCPhys The GC physical address to convert. Since this is
3872 * only used for filling the REM TLB, the A20 mask must
3873 * be applied before calling this API.
3874 * @param fWritable Whether write access is required.
3875 * @param fByPassHandlers Whether to bypass access handlers.
3876 *
3877 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3878 * a stop gap thing that should be removed once there is a better TLB
3879 * for virtual address accesses.
3880 */
3881VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3882{
3883 PGM_LOCK_VOID(pVM);
3884 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3885
3886 PPGMRAMRANGE pRam;
3887 PPGMPAGE pPage;
3888 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3889 if (RT_SUCCESS(rc))
3890 {
3891 if (PGM_PAGE_IS_BALLOONED(pPage))
3892 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3893 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3894 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3895 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3896 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3897 rc = VINF_SUCCESS;
3898 else
3899 {
3900 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3901 {
3902 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3903 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3904 }
3905 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3906 {
3907 Assert(!fByPassHandlers);
3908 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3909 }
3910 }
3911 }
3912
3913 PGM_UNLOCK(pVM);
3914 return rc;
3915}
3916
3917#ifdef VBOX_WITH_NATIVE_NEM
3918
3919/**
3920 * Interface used by NEM to check what to do on a memory access exit.
3921 *
3922 * @returns VBox status code.
3923 * @param pVM The cross context VM structure.
3924 * @param pVCpu The cross context per virtual CPU structure.
3925 * Optional.
3926 * @param GCPhys The guest physical address.
3927 * @param fMakeWritable Whether to try make the page writable or not. If it
3928 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
3929 * be returned and the return code will be unaffected
3930 * @param pInfo Where to return the page information. This is
3931 * initialized even on failure.
3932 * @param pfnChecker Page in-sync checker callback. Optional.
3933 * @param pvUser User argument to pass to pfnChecker.
3934 */
3935VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
3936 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
3937{
3938 PGM_LOCK_VOID(pVM);
3939
3940 PPGMPAGE pPage;
3941 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
3942 if (RT_SUCCESS(rc))
3943 {
3944 /* Try make it writable if requested. */
3945 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
3946 if (fMakeWritable)
3947 switch (PGM_PAGE_GET_STATE(pPage))
3948 {
3949 case PGM_PAGE_STATE_SHARED:
3950 case PGM_PAGE_STATE_WRITE_MONITORED:
3951 case PGM_PAGE_STATE_ZERO:
3952 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3953 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
3954 rc = VINF_SUCCESS;
3955 break;
3956 }
3957
3958 /* Fill in the info. */
3959 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
3960 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
3961 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
3962 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
3963 pInfo->enmType = enmType;
3964 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
3965 switch (PGM_PAGE_GET_STATE(pPage))
3966 {
3967 case PGM_PAGE_STATE_ALLOCATED:
3968 pInfo->fZeroPage = 0;
3969 break;
3970
3971 case PGM_PAGE_STATE_ZERO:
3972 pInfo->fZeroPage = 1;
3973 break;
3974
3975 case PGM_PAGE_STATE_WRITE_MONITORED:
3976 pInfo->fZeroPage = 0;
3977 break;
3978
3979 case PGM_PAGE_STATE_SHARED:
3980 pInfo->fZeroPage = 0;
3981 break;
3982
3983 case PGM_PAGE_STATE_BALLOONED:
3984 pInfo->fZeroPage = 1;
3985 break;
3986
3987 default:
3988 pInfo->fZeroPage = 1;
3989 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
3990 }
3991
3992 /* Call the checker and update NEM state. */
3993 if (pfnChecker)
3994 {
3995 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
3996 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
3997 }
3998
3999 /* Done. */
4000 PGM_UNLOCK(pVM);
4001 }
4002 else
4003 {
4004 PGM_UNLOCK(pVM);
4005
4006 pInfo->HCPhys = NIL_RTHCPHYS;
4007 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4008 pInfo->u2NemState = 0;
4009 pInfo->fHasHandlers = 0;
4010 pInfo->fZeroPage = 0;
4011 pInfo->enmType = PGMPAGETYPE_INVALID;
4012 }
4013
4014 return rc;
4015}
4016
4017
4018/**
4019 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4020 * or higher.
4021 *
4022 * @returns VBox status code from callback.
4023 * @param pVM The cross context VM structure.
4024 * @param pVCpu The cross context per CPU structure. This is
4025 * optional as its only for passing to callback.
4026 * @param uMinState The minimum NEM state value to call on.
4027 * @param pfnCallback The callback function.
4028 * @param pvUser User argument for the callback.
4029 */
4030VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4031 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4032{
4033 /*
4034 * Just brute force this problem.
4035 */
4036 PGM_LOCK_VOID(pVM);
4037 int rc = VINF_SUCCESS;
4038 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4039 {
4040 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4041 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4042 {
4043 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4044 if (u2State < uMinState)
4045 { /* likely */ }
4046 else
4047 {
4048 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4049 if (RT_SUCCESS(rc))
4050 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4051 else
4052 break;
4053 }
4054 }
4055 }
4056 PGM_UNLOCK(pVM);
4057
4058 return rc;
4059}
4060
4061
4062/**
4063 * Helper for setting the NEM state for a range of pages.
4064 *
4065 * @param paPages Array of pages to modify.
4066 * @param cPages How many pages to modify.
4067 * @param u2State The new state value.
4068 */
4069void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4070{
4071 PPGMPAGE pPage = paPages;
4072 while (cPages-- > 0)
4073 {
4074 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4075 pPage++;
4076 }
4077}
4078
4079#endif /* VBOX_WITH_NATIVE_NEM */
4080
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette