VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 96945

Last change on this file since 96945 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 146.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include "PGMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include "PGMInline.h"
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <iprt/assert.h>
47#include <iprt/string.h>
48#include <VBox/log.h>
49#ifdef IN_RING3
50# include <iprt/thread.h>
51#endif
52
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57/** Enable the physical TLB. */
58#define PGM_WITH_PHYS_TLB
59
60/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
61 * Checks if valid physical access handler return code (normal handler, not PF).
62 *
63 * Checks if the given strict status code is one of the expected ones for a
64 * physical access handler in the current context.
65 *
66 * @returns true or false.
67 * @param a_rcStrict The status code.
68 * @param a_fWrite Whether it is a write or read being serviced.
69 *
70 * @remarks We wish to keep the list of statuses here as short as possible.
71 * When changing, please make sure to update the PGMPhysRead,
72 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
73 */
74#ifdef IN_RING3
75# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
76 ( (a_rcStrict) == VINF_SUCCESS \
77 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
78#elif defined(IN_RING0)
79#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
80 ( (a_rcStrict) == VINF_SUCCESS \
81 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
82 \
83 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
84 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
85 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
86 \
87 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
88 || (a_rcStrict) == VINF_EM_DBG_STOP \
89 || (a_rcStrict) == VINF_EM_DBG_EVENT \
90 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
91 || (a_rcStrict) == VINF_EM_OFF \
92 || (a_rcStrict) == VINF_EM_SUSPEND \
93 || (a_rcStrict) == VINF_EM_RESET \
94 )
95#else
96# error "Context?"
97#endif
98
99/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
100 * Checks if valid virtual access handler return code (normal handler, not PF).
101 *
102 * Checks if the given strict status code is one of the expected ones for a
103 * virtual access handler in the current context.
104 *
105 * @returns true or false.
106 * @param a_rcStrict The status code.
107 * @param a_fWrite Whether it is a write or read being serviced.
108 *
109 * @remarks We wish to keep the list of statuses here as short as possible.
110 * When changing, please make sure to update the PGMPhysRead,
111 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
112 */
113#ifdef IN_RING3
114# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
115 ( (a_rcStrict) == VINF_SUCCESS \
116 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
117#elif defined(IN_RING0)
118# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
119 (false /* no virtual handlers in ring-0! */ )
120#else
121# error "Context?"
122#endif
123
124
125
126/**
127 * Calculate the actual table size.
128 *
129 * The memory is layed out like this:
130 * - PGMPHYSHANDLERTREE (8 bytes)
131 * - Allocation bitmap (8-byte size align)
132 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
133 */
134uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
135{
136 /*
137 * A minimum of 64 entries and a maximum of ~64K.
138 */
139 uint32_t cEntries = *pcEntries;
140 if (cEntries <= 64)
141 cEntries = 64;
142 else if (cEntries >= _64K)
143 cEntries = _64K;
144 else
145 cEntries = RT_ALIGN_32(cEntries, 16);
146
147 /*
148 * Do the initial calculation.
149 */
150 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
151 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
152 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
153 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
154
155 /*
156 * Align the total and try use up extra space from that.
157 */
158 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
159 uint32_t cAvail = cbTotalAligned - cbTotal;
160 cAvail /= sizeof(PGMPHYSHANDLER);
161 if (cAvail >= 1)
162 for (;;)
163 {
164 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
165 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
166 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
167 cbTotal = cbTreeAndBitmap + cbTable;
168 if (cbTotal <= cbTotalAligned)
169 break;
170 cEntries--;
171 Assert(cEntries >= 16);
172 }
173
174 /*
175 * Return the result.
176 */
177 *pcbTreeAndBitmap = cbTreeAndBitmap;
178 *pcEntries = cEntries;
179 return cbTotalAligned;
180}
181
182
183/**
184 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
185 */
186DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
187{
188 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
189 if (pRom->GCPhys == GCPhys)
190 return pRom;
191 return NULL;
192}
193
194#ifndef IN_RING3
195
196/**
197 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
198 * \#PF access handler callback for guest ROM range write access.}
199 *
200 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
201 */
202DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
203 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
204
205{
206 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
207 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
208 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
209 int rc;
210 RT_NOREF(uErrorCode, pvFault);
211
212 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
213
214 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
215 switch (pRom->aPages[iPage].enmProt)
216 {
217 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
218 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
219 {
220 /*
221 * If it's a simple instruction which doesn't change the cpu state
222 * we will simply skip it. Otherwise we'll have to defer it to REM.
223 */
224 uint32_t cbOp;
225 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
226 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
227 if ( RT_SUCCESS(rc)
228 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
229 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
230 {
231 switch (pDis->bOpCode)
232 {
233 /** @todo Find other instructions we can safely skip, possibly
234 * adding this kind of detection to DIS or EM. */
235 case OP_MOV:
236 pRegFrame->rip += cbOp;
237 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
238 return VINF_SUCCESS;
239 }
240 }
241 break;
242 }
243
244 case PGMROMPROT_READ_RAM_WRITE_RAM:
245 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
246 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
247 AssertRC(rc);
248 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
249
250 case PGMROMPROT_READ_ROM_WRITE_RAM:
251 /* Handle it in ring-3 because it's *way* easier there. */
252 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
253 break;
254
255 default:
256 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
257 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
258 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
259 }
260
261 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
262 return VINF_EM_RAW_EMULATE_INSTR;
263}
264
265#endif /* !IN_RING3 */
266
267
268/**
269 * @callback_method_impl{FNPGMPHYSHANDLER,
270 * Access handler callback for ROM write accesses.}
271 *
272 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
273 */
274DECLCALLBACK(VBOXSTRICTRC)
275pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
276 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
277{
278 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
279 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
280 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
281 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
282 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
283
284 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
285 RT_NOREF(pVCpu, pvPhys, enmOrigin);
286
287 if (enmAccessType == PGMACCESSTYPE_READ)
288 {
289 switch (pRomPage->enmProt)
290 {
291 /*
292 * Take the default action.
293 */
294 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
295 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
296 case PGMROMPROT_READ_ROM_WRITE_RAM:
297 case PGMROMPROT_READ_RAM_WRITE_RAM:
298 return VINF_PGM_HANDLER_DO_DEFAULT;
299
300 default:
301 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
302 pRom->aPages[iPage].enmProt, iPage, GCPhys),
303 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
304 }
305 }
306 else
307 {
308 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
309 switch (pRomPage->enmProt)
310 {
311 /*
312 * Ignore writes.
313 */
314 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
315 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
316 return VINF_SUCCESS;
317
318 /*
319 * Write to the RAM page.
320 */
321 case PGMROMPROT_READ_ROM_WRITE_RAM:
322 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
323 {
324 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
325 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
326
327 /*
328 * Take the lock, do lazy allocation, map the page and copy the data.
329 *
330 * Note that we have to bypass the mapping TLB since it works on
331 * guest physical addresses and entering the shadow page would
332 * kind of screw things up...
333 */
334 PGM_LOCK_VOID(pVM);
335
336 PPGMPAGE pShadowPage = &pRomPage->Shadow;
337 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
338 {
339 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
340 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
341 }
342
343 void *pvDstPage;
344 int rc;
345#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
346 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
347 {
348 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
349 rc = VINF_SUCCESS;
350 }
351 else
352#endif
353 {
354 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
355 if (RT_SUCCESS(rc))
356 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
357 }
358 if (RT_SUCCESS(rc))
359 {
360 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
361 pRomPage->LiveSave.fWrittenTo = true;
362
363 AssertMsg( rc == VINF_SUCCESS
364 || ( rc == VINF_PGM_SYNC_CR3
365 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
366 , ("%Rrc\n", rc));
367 rc = VINF_SUCCESS;
368 }
369
370 PGM_UNLOCK(pVM);
371 return rc;
372 }
373
374 default:
375 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
376 pRom->aPages[iPage].enmProt, iPage, GCPhys),
377 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
378 }
379 }
380}
381
382
383/**
384 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
385 */
386static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
387{
388 /*
389 * Get the MMIO2 range.
390 */
391 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
392 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
393 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
394 Assert(pMmio2->idMmio2 == hMmio2);
395 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
396 VERR_INTERNAL_ERROR_4);
397
398 /*
399 * Get the page and make sure it's an MMIO2 page.
400 */
401 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
402 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
403 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
404
405 /*
406 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
407 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
408 * page is dirty, saving the need for additional storage (bitmap).)
409 */
410 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
411
412 /*
413 * Disable the handler for this page.
414 */
415 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
416 AssertRC(rc);
417#ifndef IN_RING3
418 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
419 {
420 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
421 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
422 }
423#else
424 RT_NOREF(pVCpu, GCPtr);
425#endif
426 return VINF_SUCCESS;
427}
428
429
430#ifndef IN_RING3
431/**
432 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
433 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
434 *
435 * @remarks The @a uUser is the MMIO2 index.
436 */
437DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
438 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
439{
440 RT_NOREF(pVCpu, uErrorCode, pRegFrame);
441 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
442 if (RT_SUCCESS(rcStrict))
443 {
444 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
445 PGM_UNLOCK(pVM);
446 }
447 return rcStrict;
448}
449#endif /* !IN_RING3 */
450
451
452/**
453 * @callback_method_impl{FNPGMPHYSHANDLER,
454 * Access handler callback for MMIO2 dirty page tracing.}
455 *
456 * @remarks The @a uUser is the MMIO2 index.
457 */
458DECLCALLBACK(VBOXSTRICTRC)
459pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
460 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
461{
462 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
463 if (RT_SUCCESS(rcStrict))
464 {
465 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
466 PGM_UNLOCK(pVM);
467 if (rcStrict == VINF_SUCCESS)
468 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
469 }
470 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
471 return rcStrict;
472}
473
474
475/**
476 * Invalidates the RAM range TLBs.
477 *
478 * @param pVM The cross context VM structure.
479 */
480void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
481{
482 PGM_LOCK_VOID(pVM);
483 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
484 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
485 PGM_UNLOCK(pVM);
486}
487
488
489/**
490 * Tests if a value of type RTGCPHYS is negative if the type had been signed
491 * instead of unsigned.
492 *
493 * @returns @c true if negative, @c false if positive or zero.
494 * @param a_GCPhys The value to test.
495 * @todo Move me to iprt/types.h.
496 */
497#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
498
499
500/**
501 * Slow worker for pgmPhysGetRange.
502 *
503 * @copydoc pgmPhysGetRange
504 */
505PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
506{
507 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
508
509 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
510 while (pRam)
511 {
512 RTGCPHYS off = GCPhys - pRam->GCPhys;
513 if (off < pRam->cb)
514 {
515 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
516 return pRam;
517 }
518 if (RTGCPHYS_IS_NEGATIVE(off))
519 pRam = pRam->CTX_SUFF(pLeft);
520 else
521 pRam = pRam->CTX_SUFF(pRight);
522 }
523 return NULL;
524}
525
526
527/**
528 * Slow worker for pgmPhysGetRangeAtOrAbove.
529 *
530 * @copydoc pgmPhysGetRangeAtOrAbove
531 */
532PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
533{
534 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
535
536 PPGMRAMRANGE pLastLeft = NULL;
537 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
538 while (pRam)
539 {
540 RTGCPHYS off = GCPhys - pRam->GCPhys;
541 if (off < pRam->cb)
542 {
543 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
544 return pRam;
545 }
546 if (RTGCPHYS_IS_NEGATIVE(off))
547 {
548 pLastLeft = pRam;
549 pRam = pRam->CTX_SUFF(pLeft);
550 }
551 else
552 pRam = pRam->CTX_SUFF(pRight);
553 }
554 return pLastLeft;
555}
556
557
558/**
559 * Slow worker for pgmPhysGetPage.
560 *
561 * @copydoc pgmPhysGetPage
562 */
563PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
564{
565 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
566
567 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
568 while (pRam)
569 {
570 RTGCPHYS off = GCPhys - pRam->GCPhys;
571 if (off < pRam->cb)
572 {
573 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
574 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
575 }
576
577 if (RTGCPHYS_IS_NEGATIVE(off))
578 pRam = pRam->CTX_SUFF(pLeft);
579 else
580 pRam = pRam->CTX_SUFF(pRight);
581 }
582 return NULL;
583}
584
585
586/**
587 * Slow worker for pgmPhysGetPageEx.
588 *
589 * @copydoc pgmPhysGetPageEx
590 */
591int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
592{
593 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
594
595 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
596 while (pRam)
597 {
598 RTGCPHYS off = GCPhys - pRam->GCPhys;
599 if (off < pRam->cb)
600 {
601 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
602 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
603 return VINF_SUCCESS;
604 }
605
606 if (RTGCPHYS_IS_NEGATIVE(off))
607 pRam = pRam->CTX_SUFF(pLeft);
608 else
609 pRam = pRam->CTX_SUFF(pRight);
610 }
611
612 *ppPage = NULL;
613 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
614}
615
616
617/**
618 * Slow worker for pgmPhysGetPageAndRangeEx.
619 *
620 * @copydoc pgmPhysGetPageAndRangeEx
621 */
622int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
623{
624 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
625
626 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
627 while (pRam)
628 {
629 RTGCPHYS off = GCPhys - pRam->GCPhys;
630 if (off < pRam->cb)
631 {
632 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
633 *ppRam = pRam;
634 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
635 return VINF_SUCCESS;
636 }
637
638 if (RTGCPHYS_IS_NEGATIVE(off))
639 pRam = pRam->CTX_SUFF(pLeft);
640 else
641 pRam = pRam->CTX_SUFF(pRight);
642 }
643
644 *ppRam = NULL;
645 *ppPage = NULL;
646 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
647}
648
649
650/**
651 * Checks if Address Gate 20 is enabled or not.
652 *
653 * @returns true if enabled.
654 * @returns false if disabled.
655 * @param pVCpu The cross context virtual CPU structure.
656 */
657VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
658{
659 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
660 return pVCpu->pgm.s.fA20Enabled;
661}
662
663
664/**
665 * Validates a GC physical address.
666 *
667 * @returns true if valid.
668 * @returns false if invalid.
669 * @param pVM The cross context VM structure.
670 * @param GCPhys The physical address to validate.
671 */
672VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
673{
674 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
675 return pPage != NULL;
676}
677
678
679/**
680 * Checks if a GC physical address is a normal page,
681 * i.e. not ROM, MMIO or reserved.
682 *
683 * @returns true if normal.
684 * @returns false if invalid, ROM, MMIO or reserved page.
685 * @param pVM The cross context VM structure.
686 * @param GCPhys The physical address to check.
687 */
688VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
689{
690 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
691 return pPage
692 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
693}
694
695
696/**
697 * Converts a GC physical address to a HC physical address.
698 *
699 * @returns VINF_SUCCESS on success.
700 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
701 * page but has no physical backing.
702 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
703 * GC physical address.
704 *
705 * @param pVM The cross context VM structure.
706 * @param GCPhys The GC physical address to convert.
707 * @param pHCPhys Where to store the HC physical address on success.
708 */
709VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
710{
711 PGM_LOCK_VOID(pVM);
712 PPGMPAGE pPage;
713 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
714 if (RT_SUCCESS(rc))
715 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
716 PGM_UNLOCK(pVM);
717 return rc;
718}
719
720
721/**
722 * Invalidates all page mapping TLBs.
723 *
724 * @param pVM The cross context VM structure.
725 */
726void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
727{
728 PGM_LOCK_VOID(pVM);
729 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
730
731 /* Clear the R3 & R0 TLBs completely. */
732 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
733 {
734 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
735 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
736 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
737 }
738
739 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
740 {
741 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
742 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
743 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
744 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
745 }
746
747 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
748 PGM_UNLOCK(pVM);
749}
750
751
752/**
753 * Invalidates a page mapping TLB entry
754 *
755 * @param pVM The cross context VM structure.
756 * @param GCPhys GCPhys entry to flush
757 *
758 * @note Caller is responsible for calling IEMTlbInvalidateAllPhysicalAllCpus
759 * when needed.
760 */
761void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
762{
763 PGM_LOCK_ASSERT_OWNER(pVM);
764
765 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
766
767 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
768
769 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
770 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
771 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
772
773 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
774 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
775 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
776 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
777}
778
779
780/**
781 * Makes sure that there is at least one handy page ready for use.
782 *
783 * This will also take the appropriate actions when reaching water-marks.
784 *
785 * @returns VBox status code.
786 * @retval VINF_SUCCESS on success.
787 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
788 *
789 * @param pVM The cross context VM structure.
790 *
791 * @remarks Must be called from within the PGM critical section. It may
792 * nip back to ring-3/0 in some cases.
793 */
794static int pgmPhysEnsureHandyPage(PVMCC pVM)
795{
796 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
797
798 /*
799 * Do we need to do anything special?
800 */
801#ifdef IN_RING3
802 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
803#else
804 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
805#endif
806 {
807 /*
808 * Allocate pages only if we're out of them, or in ring-3, almost out.
809 */
810#ifdef IN_RING3
811 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
812#else
813 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
814#endif
815 {
816 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
817 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
818#ifdef IN_RING3
819 int rc = PGMR3PhysAllocateHandyPages(pVM);
820#else
821 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
822#endif
823 if (RT_UNLIKELY(rc != VINF_SUCCESS))
824 {
825 if (RT_FAILURE(rc))
826 return rc;
827 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
828 if (!pVM->pgm.s.cHandyPages)
829 {
830 LogRel(("PGM: no more handy pages!\n"));
831 return VERR_EM_NO_MEMORY;
832 }
833 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
834 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
835#ifndef IN_RING3
836 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
837#endif
838 }
839 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
840 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
841 ("%u\n", pVM->pgm.s.cHandyPages),
842 VERR_PGM_HANDY_PAGE_IPE);
843 }
844 else
845 {
846 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
847 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
848#ifndef IN_RING3
849 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
850 {
851 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
852 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
853 }
854#endif
855 }
856 }
857
858 return VINF_SUCCESS;
859}
860
861
862/**
863 * Replace a zero or shared page with new page that we can write to.
864 *
865 * @returns The following VBox status codes.
866 * @retval VINF_SUCCESS on success, pPage is modified.
867 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
868 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
869 *
870 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
871 *
872 * @param pVM The cross context VM structure.
873 * @param pPage The physical page tracking structure. This will
874 * be modified on success.
875 * @param GCPhys The address of the page.
876 *
877 * @remarks Must be called from within the PGM critical section. It may
878 * nip back to ring-3/0 in some cases.
879 *
880 * @remarks This function shouldn't really fail, however if it does
881 * it probably means we've screwed up the size of handy pages and/or
882 * the low-water mark. Or, that some device I/O is causing a lot of
883 * pages to be allocated while while the host is in a low-memory
884 * condition. This latter should be handled elsewhere and in a more
885 * controlled manner, it's on the @bugref{3170} todo list...
886 */
887int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
888{
889 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
890
891 /*
892 * Prereqs.
893 */
894 PGM_LOCK_ASSERT_OWNER(pVM);
895 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
896 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
897
898# ifdef PGM_WITH_LARGE_PAGES
899 /*
900 * Try allocate a large page if applicable.
901 */
902 if ( PGMIsUsingLargePages(pVM)
903 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
904 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
905 {
906 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
907 PPGMPAGE pBasePage;
908
909 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
910 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
911 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
912 {
913 rc = pgmPhysAllocLargePage(pVM, GCPhys);
914 if (rc == VINF_SUCCESS)
915 return rc;
916 }
917 /* Mark the base as type page table, so we don't check over and over again. */
918 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
919
920 /* fall back to 4KB pages. */
921 }
922# endif
923
924 /*
925 * Flush any shadow page table mappings of the page.
926 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
927 */
928 bool fFlushTLBs = false;
929 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
930 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
931
932 /*
933 * Ensure that we've got a page handy, take it and use it.
934 */
935 int rc2 = pgmPhysEnsureHandyPage(pVM);
936 if (RT_FAILURE(rc2))
937 {
938 if (fFlushTLBs)
939 PGM_INVL_ALL_VCPU_TLBS(pVM);
940 Assert(rc2 == VERR_EM_NO_MEMORY);
941 return rc2;
942 }
943 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
944 PGM_LOCK_ASSERT_OWNER(pVM);
945 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
946 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
947
948 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
949 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
950 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
951 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
952 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
953 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
954
955 /*
956 * There are one or two action to be taken the next time we allocate handy pages:
957 * - Tell the GMM (global memory manager) what the page is being used for.
958 * (Speeds up replacement operations - sharing and defragmenting.)
959 * - If the current backing is shared, it must be freed.
960 */
961 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
962 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
963
964 void const *pvSharedPage = NULL;
965 if (PGM_PAGE_IS_SHARED(pPage))
966 {
967 /* Mark this shared page for freeing/dereferencing. */
968 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
969 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
970
971 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
972 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
973 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
974 pVM->pgm.s.cSharedPages--;
975
976 /* Grab the address of the page so we can make a copy later on. (safe) */
977 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
978 AssertRC(rc);
979 }
980 else
981 {
982 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
983 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
984 pVM->pgm.s.cZeroPages--;
985 }
986
987 /*
988 * Do the PGMPAGE modifications.
989 */
990 pVM->pgm.s.cPrivatePages++;
991 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
992 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
993 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
994 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
995 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
996 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
997
998 /* Copy the shared page contents to the replacement page. */
999 if (pvSharedPage)
1000 {
1001 /* Get the virtual address of the new page. */
1002 PGMPAGEMAPLOCK PgMpLck;
1003 void *pvNewPage;
1004 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
1005 if (RT_SUCCESS(rc))
1006 {
1007 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
1008 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1009 }
1010 }
1011
1012 if ( fFlushTLBs
1013 && rc != VINF_PGM_GCPHYS_ALIASED)
1014 PGM_INVL_ALL_VCPU_TLBS(pVM);
1015
1016 /*
1017 * Notify NEM about the mapping change for this page.
1018 *
1019 * Note! Shadow ROM pages are complicated as they can definitely be
1020 * allocated while not visible, so play safe.
1021 */
1022 if (VM_IS_NEM_ENABLED(pVM))
1023 {
1024 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1025 if ( enmType != PGMPAGETYPE_ROM_SHADOW
1026 || pgmPhysGetPage(pVM, GCPhys) == pPage)
1027 {
1028 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1029 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
1030 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1031 if (RT_SUCCESS(rc))
1032 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1033 else
1034 rc = rc2;
1035 }
1036 }
1037
1038 return rc;
1039}
1040
1041#ifdef PGM_WITH_LARGE_PAGES
1042
1043/**
1044 * Replace a 2 MB range of zero pages with new pages that we can write to.
1045 *
1046 * @returns The following VBox status codes.
1047 * @retval VINF_SUCCESS on success, pPage is modified.
1048 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1049 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
1050 *
1051 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
1052 *
1053 * @param pVM The cross context VM structure.
1054 * @param GCPhys The address of the page.
1055 *
1056 * @remarks Must be called from within the PGM critical section. It may block
1057 * on GMM and host mutexes/locks, leaving HM context.
1058 */
1059int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1060{
1061 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1062 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1063 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1064
1065 /*
1066 * Check Prereqs.
1067 */
1068 PGM_LOCK_ASSERT_OWNER(pVM);
1069 Assert(PGMIsUsingLargePages(pVM));
1070
1071 /*
1072 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1073 */
1074 PPGMPAGE pFirstPage;
1075 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1076 if ( RT_SUCCESS(rc)
1077 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1078 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1079 {
1080 /*
1081 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1082 * since they are unallocated.
1083 */
1084 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1085 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1086 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1087 {
1088 /*
1089 * Now, make sure all the other pages in the 2 MB is in the same state.
1090 */
1091 GCPhys = GCPhysBase;
1092 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1093 while (cLeft-- > 0)
1094 {
1095 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1096 if ( pSubPage
1097 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1098 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1099 {
1100 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1101 GCPhys += GUEST_PAGE_SIZE;
1102 }
1103 else
1104 {
1105 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1106 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1107
1108 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1109 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1110 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1111 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1112 }
1113 }
1114
1115 /*
1116 * Do the allocation.
1117 */
1118# ifdef IN_RING3
1119 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1120# elif defined(IN_RING0)
1121 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1122# else
1123# error "Port me"
1124# endif
1125 if (RT_SUCCESS(rc))
1126 {
1127 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1128 pVM->pgm.s.cLargePages++;
1129 return VINF_SUCCESS;
1130 }
1131
1132 /* If we fail once, it most likely means the host's memory is too
1133 fragmented; don't bother trying again. */
1134 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1135 return rc;
1136 }
1137 }
1138 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1139}
1140
1141
1142/**
1143 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1144 *
1145 * @returns The following VBox status codes.
1146 * @retval VINF_SUCCESS on success, the large page can be used again
1147 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1148 *
1149 * @param pVM The cross context VM structure.
1150 * @param GCPhys The address of the page.
1151 * @param pLargePage Page structure of the base page
1152 */
1153int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1154{
1155 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1156
1157 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1158
1159 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1160
1161 /* Check the base page. */
1162 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1163 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1164 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1165 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1166 {
1167 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1168 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1169 }
1170
1171 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1172 /* Check all remaining pages in the 2 MB range. */
1173 unsigned i;
1174 GCPhys += GUEST_PAGE_SIZE;
1175 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1176 {
1177 PPGMPAGE pPage;
1178 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1179 AssertRCBreak(rc);
1180
1181 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1182 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1183 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1184 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1185 {
1186 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1187 break;
1188 }
1189
1190 GCPhys += GUEST_PAGE_SIZE;
1191 }
1192 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1193
1194 if (i == _2M / GUEST_PAGE_SIZE)
1195 {
1196 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1197 pVM->pgm.s.cLargePagesDisabled--;
1198 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1199 return VINF_SUCCESS;
1200 }
1201
1202 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1203}
1204
1205#endif /* PGM_WITH_LARGE_PAGES */
1206
1207
1208/**
1209 * Deal with a write monitored page.
1210 *
1211 * @returns VBox strict status code.
1212 *
1213 * @param pVM The cross context VM structure.
1214 * @param pPage The physical page tracking structure.
1215 * @param GCPhys The guest physical address of the page.
1216 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1217 * very unlikely situation where it is okay that we let NEM
1218 * fix the page access in a lazy fasion.
1219 *
1220 * @remarks Called from within the PGM critical section.
1221 */
1222void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1223{
1224 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1225 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1226 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1227 Assert(pVM->pgm.s.cMonitoredPages > 0);
1228 pVM->pgm.s.cMonitoredPages--;
1229 pVM->pgm.s.cWrittenToPages++;
1230
1231#ifdef VBOX_WITH_NATIVE_NEM
1232 /*
1233 * Notify NEM about the protection change so we won't spin forever.
1234 *
1235 * Note! NEM need to be handle to lazily correct page protection as we cannot
1236 * really get it 100% right here it seems. The page pool does this too.
1237 */
1238 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1239 {
1240 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1241 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1242 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1243 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1244 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1245 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1246 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1247 }
1248#else
1249 RT_NOREF(GCPhys);
1250#endif
1251}
1252
1253
1254/**
1255 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1256 *
1257 * @returns VBox strict status code.
1258 * @retval VINF_SUCCESS on success.
1259 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1260 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1261 *
1262 * @param pVM The cross context VM structure.
1263 * @param pPage The physical page tracking structure.
1264 * @param GCPhys The address of the page.
1265 *
1266 * @remarks Called from within the PGM critical section.
1267 */
1268int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1269{
1270 PGM_LOCK_ASSERT_OWNER(pVM);
1271 switch (PGM_PAGE_GET_STATE(pPage))
1272 {
1273 case PGM_PAGE_STATE_WRITE_MONITORED:
1274 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1275 RT_FALL_THRU();
1276 default: /* to shut up GCC */
1277 case PGM_PAGE_STATE_ALLOCATED:
1278 return VINF_SUCCESS;
1279
1280 /*
1281 * Zero pages can be dummy pages for MMIO or reserved memory,
1282 * so we need to check the flags before joining cause with
1283 * shared page replacement.
1284 */
1285 case PGM_PAGE_STATE_ZERO:
1286 if (PGM_PAGE_IS_MMIO(pPage))
1287 return VERR_PGM_PHYS_PAGE_RESERVED;
1288 RT_FALL_THRU();
1289 case PGM_PAGE_STATE_SHARED:
1290 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1291
1292 /* Not allowed to write to ballooned pages. */
1293 case PGM_PAGE_STATE_BALLOONED:
1294 return VERR_PGM_PHYS_PAGE_BALLOONED;
1295 }
1296}
1297
1298
1299/**
1300 * Internal usage: Map the page specified by its GMM ID.
1301 *
1302 * This is similar to pgmPhysPageMap
1303 *
1304 * @returns VBox status code.
1305 *
1306 * @param pVM The cross context VM structure.
1307 * @param idPage The Page ID.
1308 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1309 * @param ppv Where to store the mapping address.
1310 *
1311 * @remarks Called from within the PGM critical section. The mapping is only
1312 * valid while you are inside this section.
1313 */
1314int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1315{
1316 /*
1317 * Validation.
1318 */
1319 PGM_LOCK_ASSERT_OWNER(pVM);
1320 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1321 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1322 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1323
1324#ifdef IN_RING0
1325# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1326 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1327# else
1328 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1329# endif
1330
1331#else
1332 /*
1333 * Find/make Chunk TLB entry for the mapping chunk.
1334 */
1335 PPGMCHUNKR3MAP pMap;
1336 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1337 if (pTlbe->idChunk == idChunk)
1338 {
1339 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1340 pMap = pTlbe->pChunk;
1341 }
1342 else
1343 {
1344 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1345
1346 /*
1347 * Find the chunk, map it if necessary.
1348 */
1349 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1350 if (pMap)
1351 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1352 else
1353 {
1354 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1355 if (RT_FAILURE(rc))
1356 return rc;
1357 }
1358
1359 /*
1360 * Enter it into the Chunk TLB.
1361 */
1362 pTlbe->idChunk = idChunk;
1363 pTlbe->pChunk = pMap;
1364 }
1365
1366 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1367 return VINF_SUCCESS;
1368#endif
1369}
1370
1371
1372/**
1373 * Maps a page into the current virtual address space so it can be accessed.
1374 *
1375 * @returns VBox status code.
1376 * @retval VINF_SUCCESS on success.
1377 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1378 *
1379 * @param pVM The cross context VM structure.
1380 * @param pPage The physical page tracking structure.
1381 * @param GCPhys The address of the page.
1382 * @param ppMap Where to store the address of the mapping tracking structure.
1383 * @param ppv Where to store the mapping address of the page. The page
1384 * offset is masked off!
1385 *
1386 * @remarks Called from within the PGM critical section.
1387 */
1388static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1389{
1390 PGM_LOCK_ASSERT_OWNER(pVM);
1391 NOREF(GCPhys);
1392
1393 /*
1394 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1395 */
1396 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1397 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1398 {
1399 /* Decode the page id to a page in a MMIO2 ram range. */
1400 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1401 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1402 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1403 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1404 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1405 pPage->s.idPage, pPage->s.uStateY),
1406 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1407 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1408 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1409 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1410 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1411 *ppMap = NULL;
1412# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1413 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1414# elif defined(IN_RING0)
1415 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1416 return VINF_SUCCESS;
1417# else
1418 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1419 return VINF_SUCCESS;
1420# endif
1421 }
1422
1423# ifdef VBOX_WITH_PGM_NEM_MODE
1424 if (pVM->pgm.s.fNemMode)
1425 {
1426# ifdef IN_RING3
1427 /*
1428 * Find the corresponding RAM range and use that to locate the mapping address.
1429 */
1430 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1431 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1432 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1433 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1434 Assert(pPage == &pRam->aPages[idxPage]);
1435 *ppMap = NULL;
1436 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1437 return VINF_SUCCESS;
1438# else
1439 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1440# endif
1441 }
1442# endif
1443
1444 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1445 if (idChunk == NIL_GMM_CHUNKID)
1446 {
1447 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1448 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1449 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1450 {
1451 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1452 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1453 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1454 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1455 *ppv = pVM->pgm.s.abZeroPg;
1456 }
1457 else
1458 *ppv = pVM->pgm.s.abZeroPg;
1459 *ppMap = NULL;
1460 return VINF_SUCCESS;
1461 }
1462
1463# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1464 /*
1465 * Just use the physical address.
1466 */
1467 *ppMap = NULL;
1468 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1469
1470# elif defined(IN_RING0)
1471 /*
1472 * Go by page ID thru GMMR0.
1473 */
1474 *ppMap = NULL;
1475 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1476
1477# else
1478 /*
1479 * Find/make Chunk TLB entry for the mapping chunk.
1480 */
1481 PPGMCHUNKR3MAP pMap;
1482 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1483 if (pTlbe->idChunk == idChunk)
1484 {
1485 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1486 pMap = pTlbe->pChunk;
1487 AssertPtr(pMap->pv);
1488 }
1489 else
1490 {
1491 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1492
1493 /*
1494 * Find the chunk, map it if necessary.
1495 */
1496 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1497 if (pMap)
1498 {
1499 AssertPtr(pMap->pv);
1500 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1501 }
1502 else
1503 {
1504 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1505 if (RT_FAILURE(rc))
1506 return rc;
1507 AssertPtr(pMap->pv);
1508 }
1509
1510 /*
1511 * Enter it into the Chunk TLB.
1512 */
1513 pTlbe->idChunk = idChunk;
1514 pTlbe->pChunk = pMap;
1515 }
1516
1517 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1518 *ppMap = pMap;
1519 return VINF_SUCCESS;
1520# endif /* !IN_RING0 */
1521}
1522
1523
1524/**
1525 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1526 *
1527 * This is typically used is paths where we cannot use the TLB methods (like ROM
1528 * pages) or where there is no point in using them since we won't get many hits.
1529 *
1530 * @returns VBox strict status code.
1531 * @retval VINF_SUCCESS on success.
1532 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1533 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1534 *
1535 * @param pVM The cross context VM structure.
1536 * @param pPage The physical page tracking structure.
1537 * @param GCPhys The address of the page.
1538 * @param ppv Where to store the mapping address of the page. The page
1539 * offset is masked off!
1540 *
1541 * @remarks Called from within the PGM critical section. The mapping is only
1542 * valid while you are inside section.
1543 */
1544int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1545{
1546 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1547 if (RT_SUCCESS(rc))
1548 {
1549 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1550 PPGMPAGEMAP pMapIgnore;
1551 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1552 if (RT_FAILURE(rc2)) /* preserve rc */
1553 rc = rc2;
1554 }
1555 return rc;
1556}
1557
1558
1559/**
1560 * Maps a page into the current virtual address space so it can be accessed for
1561 * both writing and reading.
1562 *
1563 * This is typically used is paths where we cannot use the TLB methods (like ROM
1564 * pages) or where there is no point in using them since we won't get many hits.
1565 *
1566 * @returns VBox status code.
1567 * @retval VINF_SUCCESS on success.
1568 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1569 *
1570 * @param pVM The cross context VM structure.
1571 * @param pPage The physical page tracking structure. Must be in the
1572 * allocated state.
1573 * @param GCPhys The address of the page.
1574 * @param ppv Where to store the mapping address of the page. The page
1575 * offset is masked off!
1576 *
1577 * @remarks Called from within the PGM critical section. The mapping is only
1578 * valid while you are inside section.
1579 */
1580int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1581{
1582 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1583 PPGMPAGEMAP pMapIgnore;
1584 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1585}
1586
1587
1588/**
1589 * Maps a page into the current virtual address space so it can be accessed for
1590 * reading.
1591 *
1592 * This is typically used is paths where we cannot use the TLB methods (like ROM
1593 * pages) or where there is no point in using them since we won't get many hits.
1594 *
1595 * @returns VBox status code.
1596 * @retval VINF_SUCCESS on success.
1597 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1598 *
1599 * @param pVM The cross context VM structure.
1600 * @param pPage The physical page tracking structure.
1601 * @param GCPhys The address of the page.
1602 * @param ppv Where to store the mapping address of the page. The page
1603 * offset is masked off!
1604 *
1605 * @remarks Called from within the PGM critical section. The mapping is only
1606 * valid while you are inside this section.
1607 */
1608int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1609{
1610 PPGMPAGEMAP pMapIgnore;
1611 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1612}
1613
1614
1615/**
1616 * Load a guest page into the ring-3 physical TLB.
1617 *
1618 * @returns VBox status code.
1619 * @retval VINF_SUCCESS on success
1620 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1621 * @param pVM The cross context VM structure.
1622 * @param GCPhys The guest physical address in question.
1623 */
1624int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1625{
1626 PGM_LOCK_ASSERT_OWNER(pVM);
1627
1628 /*
1629 * Find the ram range and page and hand it over to the with-page function.
1630 * 99.8% of requests are expected to be in the first range.
1631 */
1632 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1633 if (!pPage)
1634 {
1635 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1636 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1637 }
1638
1639 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1640}
1641
1642
1643/**
1644 * Load a guest page into the ring-3 physical TLB.
1645 *
1646 * @returns VBox status code.
1647 * @retval VINF_SUCCESS on success
1648 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1649 *
1650 * @param pVM The cross context VM structure.
1651 * @param pPage Pointer to the PGMPAGE structure corresponding to
1652 * GCPhys.
1653 * @param GCPhys The guest physical address in question.
1654 */
1655int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1656{
1657 PGM_LOCK_ASSERT_OWNER(pVM);
1658 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1659
1660 /*
1661 * Map the page.
1662 * Make a special case for the zero page as it is kind of special.
1663 */
1664 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1665 if ( !PGM_PAGE_IS_ZERO(pPage)
1666 && !PGM_PAGE_IS_BALLOONED(pPage))
1667 {
1668 void *pv;
1669 PPGMPAGEMAP pMap;
1670 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1671 if (RT_FAILURE(rc))
1672 return rc;
1673# ifndef IN_RING0
1674 pTlbe->pMap = pMap;
1675# endif
1676 pTlbe->pv = pv;
1677 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1678 }
1679 else
1680 {
1681 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1682# ifndef IN_RING0
1683 pTlbe->pMap = NULL;
1684# endif
1685 pTlbe->pv = pVM->pgm.s.abZeroPg;
1686 }
1687# ifdef PGM_WITH_PHYS_TLB
1688 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1689 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1690 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1691 else
1692 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1693# else
1694 pTlbe->GCPhys = NIL_RTGCPHYS;
1695# endif
1696 pTlbe->pPage = pPage;
1697 return VINF_SUCCESS;
1698}
1699
1700
1701/**
1702 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1703 * own the PGM lock and therefore not need to lock the mapped page.
1704 *
1705 * @returns VBox status code.
1706 * @retval VINF_SUCCESS on success.
1707 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1708 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1709 *
1710 * @param pVM The cross context VM structure.
1711 * @param GCPhys The guest physical address of the page that should be mapped.
1712 * @param pPage Pointer to the PGMPAGE structure for the page.
1713 * @param ppv Where to store the address corresponding to GCPhys.
1714 *
1715 * @internal
1716 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1717 */
1718int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1719{
1720 int rc;
1721 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1722 PGM_LOCK_ASSERT_OWNER(pVM);
1723 pVM->pgm.s.cDeprecatedPageLocks++;
1724
1725 /*
1726 * Make sure the page is writable.
1727 */
1728 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1729 {
1730 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1731 if (RT_FAILURE(rc))
1732 return rc;
1733 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1734 }
1735 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1736
1737 /*
1738 * Get the mapping address.
1739 */
1740 PPGMPAGEMAPTLBE pTlbe;
1741 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1742 if (RT_FAILURE(rc))
1743 return rc;
1744 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1745 return VINF_SUCCESS;
1746}
1747
1748
1749/**
1750 * Locks a page mapping for writing.
1751 *
1752 * @param pVM The cross context VM structure.
1753 * @param pPage The page.
1754 * @param pTlbe The mapping TLB entry for the page.
1755 * @param pLock The lock structure (output).
1756 */
1757DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1758{
1759# ifndef IN_RING0
1760 PPGMPAGEMAP pMap = pTlbe->pMap;
1761 if (pMap)
1762 pMap->cRefs++;
1763# else
1764 RT_NOREF(pTlbe);
1765# endif
1766
1767 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1768 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1769 {
1770 if (cLocks == 0)
1771 pVM->pgm.s.cWriteLockedPages++;
1772 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1773 }
1774 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1775 {
1776 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1777 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1778# ifndef IN_RING0
1779 if (pMap)
1780 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1781# endif
1782 }
1783
1784 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1785# ifndef IN_RING0
1786 pLock->pvMap = pMap;
1787# else
1788 pLock->pvMap = NULL;
1789# endif
1790}
1791
1792/**
1793 * Locks a page mapping for reading.
1794 *
1795 * @param pVM The cross context VM structure.
1796 * @param pPage The page.
1797 * @param pTlbe The mapping TLB entry for the page.
1798 * @param pLock The lock structure (output).
1799 */
1800DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1801{
1802# ifndef IN_RING0
1803 PPGMPAGEMAP pMap = pTlbe->pMap;
1804 if (pMap)
1805 pMap->cRefs++;
1806# else
1807 RT_NOREF(pTlbe);
1808# endif
1809
1810 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1811 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1812 {
1813 if (cLocks == 0)
1814 pVM->pgm.s.cReadLockedPages++;
1815 PGM_PAGE_INC_READ_LOCKS(pPage);
1816 }
1817 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1818 {
1819 PGM_PAGE_INC_READ_LOCKS(pPage);
1820 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1821# ifndef IN_RING0
1822 if (pMap)
1823 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1824# endif
1825 }
1826
1827 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1828# ifndef IN_RING0
1829 pLock->pvMap = pMap;
1830# else
1831 pLock->pvMap = NULL;
1832# endif
1833}
1834
1835
1836/**
1837 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1838 * own the PGM lock and have access to the page structure.
1839 *
1840 * @returns VBox status code.
1841 * @retval VINF_SUCCESS on success.
1842 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1843 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1844 *
1845 * @param pVM The cross context VM structure.
1846 * @param GCPhys The guest physical address of the page that should be mapped.
1847 * @param pPage Pointer to the PGMPAGE structure for the page.
1848 * @param ppv Where to store the address corresponding to GCPhys.
1849 * @param pLock Where to store the lock information that
1850 * pgmPhysReleaseInternalPageMappingLock needs.
1851 *
1852 * @internal
1853 */
1854int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1855{
1856 int rc;
1857 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1858 PGM_LOCK_ASSERT_OWNER(pVM);
1859
1860 /*
1861 * Make sure the page is writable.
1862 */
1863 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1864 {
1865 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1866 if (RT_FAILURE(rc))
1867 return rc;
1868 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1869 }
1870 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1871
1872 /*
1873 * Do the job.
1874 */
1875 PPGMPAGEMAPTLBE pTlbe;
1876 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1877 if (RT_FAILURE(rc))
1878 return rc;
1879 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1880 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1881 return VINF_SUCCESS;
1882}
1883
1884
1885/**
1886 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1887 * own the PGM lock and have access to the page structure.
1888 *
1889 * @returns VBox status code.
1890 * @retval VINF_SUCCESS on success.
1891 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1892 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1893 *
1894 * @param pVM The cross context VM structure.
1895 * @param GCPhys The guest physical address of the page that should be mapped.
1896 * @param pPage Pointer to the PGMPAGE structure for the page.
1897 * @param ppv Where to store the address corresponding to GCPhys.
1898 * @param pLock Where to store the lock information that
1899 * pgmPhysReleaseInternalPageMappingLock needs.
1900 *
1901 * @internal
1902 */
1903int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1904{
1905 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1906 PGM_LOCK_ASSERT_OWNER(pVM);
1907 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1908
1909 /*
1910 * Do the job.
1911 */
1912 PPGMPAGEMAPTLBE pTlbe;
1913 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1914 if (RT_FAILURE(rc))
1915 return rc;
1916 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1917 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1918 return VINF_SUCCESS;
1919}
1920
1921
1922/**
1923 * Requests the mapping of a guest page into the current context.
1924 *
1925 * This API should only be used for very short term, as it will consume scarse
1926 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1927 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1928 *
1929 * This API will assume your intention is to write to the page, and will
1930 * therefore replace shared and zero pages. If you do not intend to modify
1931 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1932 *
1933 * @returns VBox status code.
1934 * @retval VINF_SUCCESS on success.
1935 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1936 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1937 *
1938 * @param pVM The cross context VM structure.
1939 * @param GCPhys The guest physical address of the page that should be
1940 * mapped.
1941 * @param ppv Where to store the address corresponding to GCPhys.
1942 * @param pLock Where to store the lock information that
1943 * PGMPhysReleasePageMappingLock needs.
1944 *
1945 * @remarks The caller is responsible for dealing with access handlers.
1946 * @todo Add an informational return code for pages with access handlers?
1947 *
1948 * @remark Avoid calling this API from within critical sections (other than
1949 * the PGM one) because of the deadlock risk. External threads may
1950 * need to delegate jobs to the EMTs.
1951 * @remarks Only one page is mapped! Make no assumption about what's after or
1952 * before the returned page!
1953 * @thread Any thread.
1954 */
1955VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1956{
1957 int rc = PGM_LOCK(pVM);
1958 AssertRCReturn(rc, rc);
1959
1960 /*
1961 * Query the Physical TLB entry for the page (may fail).
1962 */
1963 PPGMPAGEMAPTLBE pTlbe;
1964 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1965 if (RT_SUCCESS(rc))
1966 {
1967 /*
1968 * If the page is shared, the zero page, or being write monitored
1969 * it must be converted to a page that's writable if possible.
1970 */
1971 PPGMPAGE pPage = pTlbe->pPage;
1972 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1973 {
1974 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1975 if (RT_SUCCESS(rc))
1976 {
1977 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1978 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1979 }
1980 }
1981 if (RT_SUCCESS(rc))
1982 {
1983 /*
1984 * Now, just perform the locking and calculate the return address.
1985 */
1986 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1987 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1988 }
1989 }
1990
1991 PGM_UNLOCK(pVM);
1992 return rc;
1993}
1994
1995
1996/**
1997 * Requests the mapping of a guest page into the current context.
1998 *
1999 * This API should only be used for very short term, as it will consume scarse
2000 * resources (R0 and GC) in the mapping cache. When you're done with the page,
2001 * call PGMPhysReleasePageMappingLock() ASAP to release it.
2002 *
2003 * @returns VBox status code.
2004 * @retval VINF_SUCCESS on success.
2005 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2006 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2007 *
2008 * @param pVM The cross context VM structure.
2009 * @param GCPhys The guest physical address of the page that should be
2010 * mapped.
2011 * @param ppv Where to store the address corresponding to GCPhys.
2012 * @param pLock Where to store the lock information that
2013 * PGMPhysReleasePageMappingLock needs.
2014 *
2015 * @remarks The caller is responsible for dealing with access handlers.
2016 * @todo Add an informational return code for pages with access handlers?
2017 *
2018 * @remarks Avoid calling this API from within critical sections (other than
2019 * the PGM one) because of the deadlock risk.
2020 * @remarks Only one page is mapped! Make no assumption about what's after or
2021 * before the returned page!
2022 * @thread Any thread.
2023 */
2024VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
2025{
2026 int rc = PGM_LOCK(pVM);
2027 AssertRCReturn(rc, rc);
2028
2029 /*
2030 * Query the Physical TLB entry for the page (may fail).
2031 */
2032 PPGMPAGEMAPTLBE pTlbe;
2033 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
2034 if (RT_SUCCESS(rc))
2035 {
2036 /* MMIO pages doesn't have any readable backing. */
2037 PPGMPAGE pPage = pTlbe->pPage;
2038 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
2039 rc = VERR_PGM_PHYS_PAGE_RESERVED;
2040 else
2041 {
2042 /*
2043 * Now, just perform the locking and calculate the return address.
2044 */
2045 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2046 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
2047 }
2048 }
2049
2050 PGM_UNLOCK(pVM);
2051 return rc;
2052}
2053
2054
2055/**
2056 * Requests the mapping of a guest page given by virtual address into the current context.
2057 *
2058 * This API should only be used for very short term, as it will consume
2059 * scarse resources (R0 and GC) in the mapping cache. When you're done
2060 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2061 *
2062 * This API will assume your intention is to write to the page, and will
2063 * therefore replace shared and zero pages. If you do not intend to modify
2064 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2065 *
2066 * @returns VBox status code.
2067 * @retval VINF_SUCCESS on success.
2068 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2069 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2070 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2071 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2072 *
2073 * @param pVCpu The cross context virtual CPU structure.
2074 * @param GCPtr The guest physical address of the page that should be
2075 * mapped.
2076 * @param ppv Where to store the address corresponding to GCPhys.
2077 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2078 *
2079 * @remark Avoid calling this API from within critical sections (other than
2080 * the PGM one) because of the deadlock risk.
2081 * @thread EMT
2082 */
2083VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2084{
2085 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2086 RTGCPHYS GCPhys;
2087 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2088 if (RT_SUCCESS(rc))
2089 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2090 return rc;
2091}
2092
2093
2094/**
2095 * Requests the mapping of a guest page given by virtual address into the current context.
2096 *
2097 * This API should only be used for very short term, as it will consume
2098 * scarse resources (R0 and GC) in the mapping cache. When you're done
2099 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2100 *
2101 * @returns VBox status code.
2102 * @retval VINF_SUCCESS on success.
2103 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2104 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2105 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2106 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2107 *
2108 * @param pVCpu The cross context virtual CPU structure.
2109 * @param GCPtr The guest physical address of the page that should be
2110 * mapped.
2111 * @param ppv Where to store the address corresponding to GCPtr.
2112 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2113 *
2114 * @remark Avoid calling this API from within critical sections (other than
2115 * the PGM one) because of the deadlock risk.
2116 * @thread EMT
2117 */
2118VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2119{
2120 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2121 RTGCPHYS GCPhys;
2122 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2123 if (RT_SUCCESS(rc))
2124 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2125 return rc;
2126}
2127
2128
2129/**
2130 * Release the mapping of a guest page.
2131 *
2132 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2133 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2134 *
2135 * @param pVM The cross context VM structure.
2136 * @param pLock The lock structure initialized by the mapping function.
2137 */
2138VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2139{
2140# ifndef IN_RING0
2141 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2142# endif
2143 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2144 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2145
2146 pLock->uPageAndType = 0;
2147 pLock->pvMap = NULL;
2148
2149 PGM_LOCK_VOID(pVM);
2150 if (fWriteLock)
2151 {
2152 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2153 Assert(cLocks > 0);
2154 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2155 {
2156 if (cLocks == 1)
2157 {
2158 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2159 pVM->pgm.s.cWriteLockedPages--;
2160 }
2161 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2162 }
2163
2164 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2165 { /* probably extremely likely */ }
2166 else
2167 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2168 }
2169 else
2170 {
2171 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2172 Assert(cLocks > 0);
2173 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2174 {
2175 if (cLocks == 1)
2176 {
2177 Assert(pVM->pgm.s.cReadLockedPages > 0);
2178 pVM->pgm.s.cReadLockedPages--;
2179 }
2180 PGM_PAGE_DEC_READ_LOCKS(pPage);
2181 }
2182 }
2183
2184# ifndef IN_RING0
2185 if (pMap)
2186 {
2187 Assert(pMap->cRefs >= 1);
2188 pMap->cRefs--;
2189 }
2190# endif
2191 PGM_UNLOCK(pVM);
2192}
2193
2194
2195#ifdef IN_RING3
2196/**
2197 * Release the mapping of multiple guest pages.
2198 *
2199 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2200 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2201 *
2202 * @param pVM The cross context VM structure.
2203 * @param cPages Number of pages to unlock.
2204 * @param paLocks Array of locks lock structure initialized by the mapping
2205 * function.
2206 */
2207VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2208{
2209 Assert(cPages > 0);
2210 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2211#ifdef VBOX_STRICT
2212 for (uint32_t i = 1; i < cPages; i++)
2213 {
2214 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2215 AssertPtr(paLocks[i].uPageAndType);
2216 }
2217#endif
2218
2219 PGM_LOCK_VOID(pVM);
2220 if (fWriteLock)
2221 {
2222 /*
2223 * Write locks:
2224 */
2225 for (uint32_t i = 0; i < cPages; i++)
2226 {
2227 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2228 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2229 Assert(cLocks > 0);
2230 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2231 {
2232 if (cLocks == 1)
2233 {
2234 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2235 pVM->pgm.s.cWriteLockedPages--;
2236 }
2237 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2238 }
2239
2240 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2241 { /* probably extremely likely */ }
2242 else
2243 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2244
2245 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2246 if (pMap)
2247 {
2248 Assert(pMap->cRefs >= 1);
2249 pMap->cRefs--;
2250 }
2251
2252 /* Yield the lock: */
2253 if ((i & 1023) == 1023 && i + 1 < cPages)
2254 {
2255 PGM_UNLOCK(pVM);
2256 PGM_LOCK_VOID(pVM);
2257 }
2258 }
2259 }
2260 else
2261 {
2262 /*
2263 * Read locks:
2264 */
2265 for (uint32_t i = 0; i < cPages; i++)
2266 {
2267 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2268 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2269 Assert(cLocks > 0);
2270 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2271 {
2272 if (cLocks == 1)
2273 {
2274 Assert(pVM->pgm.s.cReadLockedPages > 0);
2275 pVM->pgm.s.cReadLockedPages--;
2276 }
2277 PGM_PAGE_DEC_READ_LOCKS(pPage);
2278 }
2279
2280 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2281 if (pMap)
2282 {
2283 Assert(pMap->cRefs >= 1);
2284 pMap->cRefs--;
2285 }
2286
2287 /* Yield the lock: */
2288 if ((i & 1023) == 1023 && i + 1 < cPages)
2289 {
2290 PGM_UNLOCK(pVM);
2291 PGM_LOCK_VOID(pVM);
2292 }
2293 }
2294 }
2295 PGM_UNLOCK(pVM);
2296
2297 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2298}
2299#endif /* IN_RING3 */
2300
2301
2302/**
2303 * Release the internal mapping of a guest page.
2304 *
2305 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2306 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2307 *
2308 * @param pVM The cross context VM structure.
2309 * @param pLock The lock structure initialized by the mapping function.
2310 *
2311 * @remarks Caller must hold the PGM lock.
2312 */
2313void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2314{
2315 PGM_LOCK_ASSERT_OWNER(pVM);
2316 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2317}
2318
2319
2320/**
2321 * Converts a GC physical address to a HC ring-3 pointer.
2322 *
2323 * @returns VINF_SUCCESS on success.
2324 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2325 * page but has no physical backing.
2326 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2327 * GC physical address.
2328 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2329 * a dynamic ram chunk boundary
2330 *
2331 * @param pVM The cross context VM structure.
2332 * @param GCPhys The GC physical address to convert.
2333 * @param pR3Ptr Where to store the R3 pointer on success.
2334 *
2335 * @deprecated Avoid when possible!
2336 */
2337int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2338{
2339/** @todo this is kind of hacky and needs some more work. */
2340#ifndef DEBUG_sandervl
2341 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2342#endif
2343
2344 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2345 PGM_LOCK_VOID(pVM);
2346
2347 PPGMRAMRANGE pRam;
2348 PPGMPAGE pPage;
2349 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2350 if (RT_SUCCESS(rc))
2351 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2352
2353 PGM_UNLOCK(pVM);
2354 Assert(rc <= VINF_SUCCESS);
2355 return rc;
2356}
2357
2358
2359/**
2360 * Converts a guest pointer to a GC physical address.
2361 *
2362 * This uses the current CR3/CR0/CR4 of the guest.
2363 *
2364 * @returns VBox status code.
2365 * @param pVCpu The cross context virtual CPU structure.
2366 * @param GCPtr The guest pointer to convert.
2367 * @param pGCPhys Where to store the GC physical address.
2368 */
2369VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2370{
2371 PGMPTWALK Walk;
2372 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2373 if (pGCPhys && RT_SUCCESS(rc))
2374 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2375 return rc;
2376}
2377
2378
2379/**
2380 * Converts a guest pointer to a HC physical address.
2381 *
2382 * This uses the current CR3/CR0/CR4 of the guest.
2383 *
2384 * @returns VBox status code.
2385 * @param pVCpu The cross context virtual CPU structure.
2386 * @param GCPtr The guest pointer to convert.
2387 * @param pHCPhys Where to store the HC physical address.
2388 */
2389VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2390{
2391 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2392 PGMPTWALK Walk;
2393 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2394 if (RT_SUCCESS(rc))
2395 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2396 return rc;
2397}
2398
2399
2400
2401#undef LOG_GROUP
2402#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2403
2404
2405#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2406/**
2407 * Cache PGMPhys memory access
2408 *
2409 * @param pVM The cross context VM structure.
2410 * @param pCache Cache structure pointer
2411 * @param GCPhys GC physical address
2412 * @param pbR3 HC pointer corresponding to physical page
2413 *
2414 * @thread EMT.
2415 */
2416static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2417{
2418 uint32_t iCacheIndex;
2419
2420 Assert(VM_IS_EMT(pVM));
2421
2422 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2423 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
2424
2425 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2426
2427 ASMBitSet(&pCache->aEntries, iCacheIndex);
2428
2429 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2430 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2431}
2432#endif /* IN_RING3 */
2433
2434
2435/**
2436 * Deals with reading from a page with one or more ALL access handlers.
2437 *
2438 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2439 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2440 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2441 *
2442 * @param pVM The cross context VM structure.
2443 * @param pPage The page descriptor.
2444 * @param GCPhys The physical address to start reading at.
2445 * @param pvBuf Where to put the bits we read.
2446 * @param cb How much to read - less or equal to a page.
2447 * @param enmOrigin The origin of this call.
2448 */
2449static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2450 PGMACCESSORIGIN enmOrigin)
2451{
2452 /*
2453 * The most frequent access here is MMIO and shadowed ROM.
2454 * The current code ASSUMES all these access handlers covers full pages!
2455 */
2456
2457 /*
2458 * Whatever we do we need the source page, map it first.
2459 */
2460 PGMPAGEMAPLOCK PgMpLck;
2461 const void *pvSrc = NULL;
2462 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2463/** @todo Check how this can work for MMIO pages? */
2464 if (RT_FAILURE(rc))
2465 {
2466 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2467 GCPhys, pPage, rc));
2468 memset(pvBuf, 0xff, cb);
2469 return VINF_SUCCESS;
2470 }
2471
2472 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2473
2474 /*
2475 * Deal with any physical handlers.
2476 */
2477 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2478 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2479 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2480 {
2481 PPGMPHYSHANDLER pCur;
2482 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2483 if (RT_SUCCESS(rc))
2484 {
2485 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2486 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
2487 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2488#ifndef IN_RING3
2489 if (enmOrigin != PGMACCESSORIGIN_IEM)
2490 {
2491 /* Cannot reliably handle informational status codes in this context */
2492 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2493 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2494 }
2495#endif
2496 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2497 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
2498 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2499 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2500
2501 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2502 STAM_PROFILE_START(&pCur->Stat, h);
2503 PGM_LOCK_ASSERT_OWNER(pVM);
2504
2505 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2506 PGM_UNLOCK(pVM);
2507 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2508 PGM_LOCK_VOID(pVM);
2509
2510 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2511 pCur = NULL; /* might not be valid anymore. */
2512 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2513 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2514 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2515 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2516 {
2517 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2518 return rcStrict;
2519 }
2520 }
2521 else if (rc == VERR_NOT_FOUND)
2522 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
2523 else
2524 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
2525 }
2526
2527 /*
2528 * Take the default action.
2529 */
2530 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2531 {
2532 memcpy(pvBuf, pvSrc, cb);
2533 rcStrict = VINF_SUCCESS;
2534 }
2535 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2536 return rcStrict;
2537}
2538
2539
2540/**
2541 * Read physical memory.
2542 *
2543 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2544 * want to ignore those.
2545 *
2546 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2547 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2548 * @retval VINF_SUCCESS in all context - read completed.
2549 *
2550 * @retval VINF_EM_OFF in RC and R0 - read completed.
2551 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2552 * @retval VINF_EM_RESET in RC and R0 - read completed.
2553 * @retval VINF_EM_HALT in RC and R0 - read completed.
2554 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2555 *
2556 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2557 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2558 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2559 *
2560 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2561 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2562 *
2563 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2564 *
2565 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2566 * haven't been cleared for strict status codes yet.
2567 *
2568 * @param pVM The cross context VM structure.
2569 * @param GCPhys Physical address start reading from.
2570 * @param pvBuf Where to put the read bits.
2571 * @param cbRead How many bytes to read.
2572 * @param enmOrigin The origin of this call.
2573 */
2574VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2575{
2576 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2577 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2578
2579 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2580 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2581
2582 PGM_LOCK_VOID(pVM);
2583
2584 /*
2585 * Copy loop on ram ranges.
2586 */
2587 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2588 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2589 for (;;)
2590 {
2591 /* Inside range or not? */
2592 if (pRam && GCPhys >= pRam->GCPhys)
2593 {
2594 /*
2595 * Must work our way thru this page by page.
2596 */
2597 RTGCPHYS off = GCPhys - pRam->GCPhys;
2598 while (off < pRam->cb)
2599 {
2600 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2601 PPGMPAGE pPage = &pRam->aPages[iPage];
2602 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2603 if (cb > cbRead)
2604 cb = cbRead;
2605
2606 /*
2607 * Normal page? Get the pointer to it.
2608 */
2609 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2610 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2611 {
2612 /*
2613 * Get the pointer to the page.
2614 */
2615 PGMPAGEMAPLOCK PgMpLck;
2616 const void *pvSrc;
2617 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2618 if (RT_SUCCESS(rc))
2619 {
2620 memcpy(pvBuf, pvSrc, cb);
2621 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2622 }
2623 else
2624 {
2625 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2626 pRam->GCPhys + off, pPage, rc));
2627 memset(pvBuf, 0xff, cb);
2628 }
2629 }
2630 /*
2631 * Have ALL/MMIO access handlers.
2632 */
2633 else
2634 {
2635 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2636 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2637 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2638 else
2639 {
2640 memset(pvBuf, 0xff, cb);
2641 PGM_UNLOCK(pVM);
2642 return rcStrict2;
2643 }
2644 }
2645
2646 /* next page */
2647 if (cb >= cbRead)
2648 {
2649 PGM_UNLOCK(pVM);
2650 return rcStrict;
2651 }
2652 cbRead -= cb;
2653 off += cb;
2654 pvBuf = (char *)pvBuf + cb;
2655 } /* walk pages in ram range. */
2656
2657 GCPhys = pRam->GCPhysLast + 1;
2658 }
2659 else
2660 {
2661 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2662
2663 /*
2664 * Unassigned address space.
2665 */
2666 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2667 if (cb >= cbRead)
2668 {
2669 memset(pvBuf, 0xff, cbRead);
2670 break;
2671 }
2672 memset(pvBuf, 0xff, cb);
2673
2674 cbRead -= cb;
2675 pvBuf = (char *)pvBuf + cb;
2676 GCPhys += cb;
2677 }
2678
2679 /* Advance range if necessary. */
2680 while (pRam && GCPhys > pRam->GCPhysLast)
2681 pRam = pRam->CTX_SUFF(pNext);
2682 } /* Ram range walk */
2683
2684 PGM_UNLOCK(pVM);
2685 return rcStrict;
2686}
2687
2688
2689/**
2690 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2691 *
2692 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2693 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2694 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2695 *
2696 * @param pVM The cross context VM structure.
2697 * @param pPage The page descriptor.
2698 * @param GCPhys The physical address to start writing at.
2699 * @param pvBuf What to write.
2700 * @param cbWrite How much to write - less or equal to a page.
2701 * @param enmOrigin The origin of this call.
2702 */
2703static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2704 PGMACCESSORIGIN enmOrigin)
2705{
2706 PGMPAGEMAPLOCK PgMpLck;
2707 void *pvDst = NULL;
2708 VBOXSTRICTRC rcStrict;
2709
2710 /*
2711 * Give priority to physical handlers (like #PF does).
2712 *
2713 * Hope for a lonely physical handler first that covers the whole write
2714 * area. This should be a pretty frequent case with MMIO and the heavy
2715 * usage of full page handlers in the page pool.
2716 */
2717 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2718 PPGMPHYSHANDLER pCur;
2719 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2720 if (RT_SUCCESS(rcStrict))
2721 {
2722 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2723#ifndef IN_RING3
2724 if (enmOrigin != PGMACCESSORIGIN_IEM)
2725 /* Cannot reliably handle informational status codes in this context */
2726 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2727#endif
2728 size_t cbRange = pCur->KeyLast - GCPhys + 1;
2729 if (cbRange > cbWrite)
2730 cbRange = cbWrite;
2731
2732 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
2733 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2734 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2735 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2736 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2737 else
2738 rcStrict = VINF_SUCCESS;
2739 if (RT_SUCCESS(rcStrict))
2740 {
2741 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2742 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2743 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2744 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2745 STAM_PROFILE_START(&pCur->Stat, h);
2746
2747 /* Most handlers will want to release the PGM lock for deadlock prevention
2748 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2749 dirty page trackers will want to keep it for performance reasons. */
2750 PGM_LOCK_ASSERT_OWNER(pVM);
2751 if (pCurType->fKeepPgmLock)
2752 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2753 else
2754 {
2755 PGM_UNLOCK(pVM);
2756 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2757 PGM_LOCK_VOID(pVM);
2758 }
2759
2760 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2761 pCur = NULL; /* might not be valid anymore. */
2762 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2763 {
2764 if (pvDst)
2765 memcpy(pvDst, pvBuf, cbRange);
2766 rcStrict = VINF_SUCCESS;
2767 }
2768 else
2769 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2770 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2771 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2772 }
2773 else
2774 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2775 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2776 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2777 {
2778 if (pvDst)
2779 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2780 return rcStrict;
2781 }
2782
2783 /* more fun to be had below */
2784 cbWrite -= cbRange;
2785 GCPhys += cbRange;
2786 pvBuf = (uint8_t *)pvBuf + cbRange;
2787 pvDst = (uint8_t *)pvDst + cbRange;
2788 }
2789 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
2790 rcStrict = VINF_SUCCESS;
2791 else
2792 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2793 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
2794
2795 /*
2796 * Deal with all the odd ends (used to be deal with virt+phys).
2797 */
2798 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2799
2800 /* We need a writable destination page. */
2801 if (!pvDst)
2802 {
2803 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2804 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2805 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2806 rc2);
2807 }
2808
2809 /** @todo clean up this code some more now there are no virtual handlers any
2810 * more. */
2811 /* The loop state (big + ugly). */
2812 PPGMPHYSHANDLER pPhys = NULL;
2813 uint32_t offPhys = GUEST_PAGE_SIZE;
2814 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2815 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2816
2817 /* The loop. */
2818 for (;;)
2819 {
2820 if (fMorePhys && !pPhys)
2821 {
2822 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
2823 if (RT_SUCCESS_NP(rcStrict))
2824 {
2825 offPhys = 0;
2826 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2827 }
2828 else
2829 {
2830 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2831
2832 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2833 GCPhys, &pPhys);
2834 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
2835 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2836
2837 if ( RT_SUCCESS(rcStrict)
2838 && pPhys->Key <= GCPhys + (cbWrite - 1))
2839 {
2840 offPhys = pPhys->Key - GCPhys;
2841 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2842 Assert(pPhys->KeyLast - pPhys->Key < _4G);
2843 }
2844 else
2845 {
2846 pPhys = NULL;
2847 fMorePhys = false;
2848 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2849 }
2850 }
2851 }
2852
2853 /*
2854 * Handle access to space without handlers (that's easy).
2855 */
2856 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2857 uint32_t cbRange = (uint32_t)cbWrite;
2858 Assert(cbRange == cbWrite);
2859
2860 /*
2861 * Physical handler.
2862 */
2863 if (!offPhys)
2864 {
2865#ifndef IN_RING3
2866 if (enmOrigin != PGMACCESSORIGIN_IEM)
2867 /* Cannot reliably handle informational status codes in this context */
2868 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2869#endif
2870 if (cbRange > offPhysLast + 1)
2871 cbRange = offPhysLast + 1;
2872
2873 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
2874 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2875 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2876 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2877
2878 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2879 STAM_PROFILE_START(&pPhys->Stat, h);
2880
2881 /* Most handlers will want to release the PGM lock for deadlock prevention
2882 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2883 dirty page trackers will want to keep it for performance reasons. */
2884 PGM_LOCK_ASSERT_OWNER(pVM);
2885 if (pCurType->fKeepPgmLock)
2886 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2887 else
2888 {
2889 PGM_UNLOCK(pVM);
2890 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2891 PGM_LOCK_VOID(pVM);
2892 }
2893
2894 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2895 pPhys = NULL; /* might not be valid anymore. */
2896 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2897 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2898 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2899 }
2900
2901 /*
2902 * Execute the default action and merge the status codes.
2903 */
2904 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2905 {
2906 memcpy(pvDst, pvBuf, cbRange);
2907 rcStrict2 = VINF_SUCCESS;
2908 }
2909 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2910 {
2911 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2912 return rcStrict2;
2913 }
2914 else
2915 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2916
2917 /*
2918 * Advance if we've got more stuff to do.
2919 */
2920 if (cbRange >= cbWrite)
2921 {
2922 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2923 return rcStrict;
2924 }
2925
2926
2927 cbWrite -= cbRange;
2928 GCPhys += cbRange;
2929 pvBuf = (uint8_t *)pvBuf + cbRange;
2930 pvDst = (uint8_t *)pvDst + cbRange;
2931
2932 offPhys -= cbRange;
2933 offPhysLast -= cbRange;
2934 }
2935}
2936
2937
2938/**
2939 * Write to physical memory.
2940 *
2941 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2942 * want to ignore those.
2943 *
2944 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2945 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2946 * @retval VINF_SUCCESS in all context - write completed.
2947 *
2948 * @retval VINF_EM_OFF in RC and R0 - write completed.
2949 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2950 * @retval VINF_EM_RESET in RC and R0 - write completed.
2951 * @retval VINF_EM_HALT in RC and R0 - write completed.
2952 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2953 *
2954 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2955 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2956 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2957 *
2958 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2959 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2960 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2961 *
2962 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2963 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2964 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2965 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2966 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2967 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2968 *
2969 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2970 * haven't been cleared for strict status codes yet.
2971 *
2972 *
2973 * @param pVM The cross context VM structure.
2974 * @param GCPhys Physical address to write to.
2975 * @param pvBuf What to write.
2976 * @param cbWrite How many bytes to write.
2977 * @param enmOrigin Who is calling.
2978 */
2979VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2980{
2981 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2982 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2983 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2984
2985 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2986 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2987
2988 PGM_LOCK_VOID(pVM);
2989
2990 /*
2991 * Copy loop on ram ranges.
2992 */
2993 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2994 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2995 for (;;)
2996 {
2997 /* Inside range or not? */
2998 if (pRam && GCPhys >= pRam->GCPhys)
2999 {
3000 /*
3001 * Must work our way thru this page by page.
3002 */
3003 RTGCPTR off = GCPhys - pRam->GCPhys;
3004 while (off < pRam->cb)
3005 {
3006 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
3007 PPGMPAGE pPage = &pRam->aPages[iPage];
3008 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
3009 if (cb > cbWrite)
3010 cb = cbWrite;
3011
3012 /*
3013 * Normal page? Get the pointer to it.
3014 */
3015 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3016 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3017 {
3018 PGMPAGEMAPLOCK PgMpLck;
3019 void *pvDst;
3020 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3021 if (RT_SUCCESS(rc))
3022 {
3023 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3024 memcpy(pvDst, pvBuf, cb);
3025 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3026 }
3027 /* Ignore writes to ballooned pages. */
3028 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3029 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3030 pRam->GCPhys + off, pPage, rc));
3031 }
3032 /*
3033 * Active WRITE or ALL access handlers.
3034 */
3035 else
3036 {
3037 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3038 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3039 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3040 else
3041 {
3042 PGM_UNLOCK(pVM);
3043 return rcStrict2;
3044 }
3045 }
3046
3047 /* next page */
3048 if (cb >= cbWrite)
3049 {
3050 PGM_UNLOCK(pVM);
3051 return rcStrict;
3052 }
3053
3054 cbWrite -= cb;
3055 off += cb;
3056 pvBuf = (const char *)pvBuf + cb;
3057 } /* walk pages in ram range */
3058
3059 GCPhys = pRam->GCPhysLast + 1;
3060 }
3061 else
3062 {
3063 /*
3064 * Unassigned address space, skip it.
3065 */
3066 if (!pRam)
3067 break;
3068 size_t cb = pRam->GCPhys - GCPhys;
3069 if (cb >= cbWrite)
3070 break;
3071 cbWrite -= cb;
3072 pvBuf = (const char *)pvBuf + cb;
3073 GCPhys += cb;
3074 }
3075
3076 /* Advance range if necessary. */
3077 while (pRam && GCPhys > pRam->GCPhysLast)
3078 pRam = pRam->CTX_SUFF(pNext);
3079 } /* Ram range walk */
3080
3081 PGM_UNLOCK(pVM);
3082 return rcStrict;
3083}
3084
3085
3086/**
3087 * Read from guest physical memory by GC physical address, bypassing
3088 * MMIO and access handlers.
3089 *
3090 * @returns VBox status code.
3091 * @param pVM The cross context VM structure.
3092 * @param pvDst The destination address.
3093 * @param GCPhysSrc The source address (GC physical address).
3094 * @param cb The number of bytes to read.
3095 */
3096VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3097{
3098 /*
3099 * Treat the first page as a special case.
3100 */
3101 if (!cb)
3102 return VINF_SUCCESS;
3103
3104 /* map the 1st page */
3105 void const *pvSrc;
3106 PGMPAGEMAPLOCK Lock;
3107 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3108 if (RT_FAILURE(rc))
3109 return rc;
3110
3111 /* optimize for the case where access is completely within the first page. */
3112 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3113 if (RT_LIKELY(cb <= cbPage))
3114 {
3115 memcpy(pvDst, pvSrc, cb);
3116 PGMPhysReleasePageMappingLock(pVM, &Lock);
3117 return VINF_SUCCESS;
3118 }
3119
3120 /* copy to the end of the page. */
3121 memcpy(pvDst, pvSrc, cbPage);
3122 PGMPhysReleasePageMappingLock(pVM, &Lock);
3123 GCPhysSrc += cbPage;
3124 pvDst = (uint8_t *)pvDst + cbPage;
3125 cb -= cbPage;
3126
3127 /*
3128 * Page by page.
3129 */
3130 for (;;)
3131 {
3132 /* map the page */
3133 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3134 if (RT_FAILURE(rc))
3135 return rc;
3136
3137 /* last page? */
3138 if (cb <= GUEST_PAGE_SIZE)
3139 {
3140 memcpy(pvDst, pvSrc, cb);
3141 PGMPhysReleasePageMappingLock(pVM, &Lock);
3142 return VINF_SUCCESS;
3143 }
3144
3145 /* copy the entire page and advance */
3146 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3147 PGMPhysReleasePageMappingLock(pVM, &Lock);
3148 GCPhysSrc += GUEST_PAGE_SIZE;
3149 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3150 cb -= GUEST_PAGE_SIZE;
3151 }
3152 /* won't ever get here. */
3153}
3154
3155
3156/**
3157 * Write to guest physical memory referenced by GC pointer.
3158 * Write memory to GC physical address in guest physical memory.
3159 *
3160 * This will bypass MMIO and access handlers.
3161 *
3162 * @returns VBox status code.
3163 * @param pVM The cross context VM structure.
3164 * @param GCPhysDst The GC physical address of the destination.
3165 * @param pvSrc The source buffer.
3166 * @param cb The number of bytes to write.
3167 */
3168VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3169{
3170 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3171
3172 /*
3173 * Treat the first page as a special case.
3174 */
3175 if (!cb)
3176 return VINF_SUCCESS;
3177
3178 /* map the 1st page */
3179 void *pvDst;
3180 PGMPAGEMAPLOCK Lock;
3181 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3182 if (RT_FAILURE(rc))
3183 return rc;
3184
3185 /* optimize for the case where access is completely within the first page. */
3186 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3187 if (RT_LIKELY(cb <= cbPage))
3188 {
3189 memcpy(pvDst, pvSrc, cb);
3190 PGMPhysReleasePageMappingLock(pVM, &Lock);
3191 return VINF_SUCCESS;
3192 }
3193
3194 /* copy to the end of the page. */
3195 memcpy(pvDst, pvSrc, cbPage);
3196 PGMPhysReleasePageMappingLock(pVM, &Lock);
3197 GCPhysDst += cbPage;
3198 pvSrc = (const uint8_t *)pvSrc + cbPage;
3199 cb -= cbPage;
3200
3201 /*
3202 * Page by page.
3203 */
3204 for (;;)
3205 {
3206 /* map the page */
3207 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3208 if (RT_FAILURE(rc))
3209 return rc;
3210
3211 /* last page? */
3212 if (cb <= GUEST_PAGE_SIZE)
3213 {
3214 memcpy(pvDst, pvSrc, cb);
3215 PGMPhysReleasePageMappingLock(pVM, &Lock);
3216 return VINF_SUCCESS;
3217 }
3218
3219 /* copy the entire page and advance */
3220 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3221 PGMPhysReleasePageMappingLock(pVM, &Lock);
3222 GCPhysDst += GUEST_PAGE_SIZE;
3223 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3224 cb -= GUEST_PAGE_SIZE;
3225 }
3226 /* won't ever get here. */
3227}
3228
3229
3230/**
3231 * Read from guest physical memory referenced by GC pointer.
3232 *
3233 * This function uses the current CR3/CR0/CR4 of the guest and will
3234 * bypass access handlers and not set any accessed bits.
3235 *
3236 * @returns VBox status code.
3237 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3238 * @param pvDst The destination address.
3239 * @param GCPtrSrc The source address (GC pointer).
3240 * @param cb The number of bytes to read.
3241 */
3242VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3243{
3244 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3245/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3246
3247 /*
3248 * Treat the first page as a special case.
3249 */
3250 if (!cb)
3251 return VINF_SUCCESS;
3252
3253 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3254 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3255
3256 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3257 * when many VCPUs are fighting for the lock.
3258 */
3259 PGM_LOCK_VOID(pVM);
3260
3261 /* map the 1st page */
3262 void const *pvSrc;
3263 PGMPAGEMAPLOCK Lock;
3264 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3265 if (RT_FAILURE(rc))
3266 {
3267 PGM_UNLOCK(pVM);
3268 return rc;
3269 }
3270
3271 /* optimize for the case where access is completely within the first page. */
3272 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3273 if (RT_LIKELY(cb <= cbPage))
3274 {
3275 memcpy(pvDst, pvSrc, cb);
3276 PGMPhysReleasePageMappingLock(pVM, &Lock);
3277 PGM_UNLOCK(pVM);
3278 return VINF_SUCCESS;
3279 }
3280
3281 /* copy to the end of the page. */
3282 memcpy(pvDst, pvSrc, cbPage);
3283 PGMPhysReleasePageMappingLock(pVM, &Lock);
3284 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3285 pvDst = (uint8_t *)pvDst + cbPage;
3286 cb -= cbPage;
3287
3288 /*
3289 * Page by page.
3290 */
3291 for (;;)
3292 {
3293 /* map the page */
3294 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3295 if (RT_FAILURE(rc))
3296 {
3297 PGM_UNLOCK(pVM);
3298 return rc;
3299 }
3300
3301 /* last page? */
3302 if (cb <= GUEST_PAGE_SIZE)
3303 {
3304 memcpy(pvDst, pvSrc, cb);
3305 PGMPhysReleasePageMappingLock(pVM, &Lock);
3306 PGM_UNLOCK(pVM);
3307 return VINF_SUCCESS;
3308 }
3309
3310 /* copy the entire page and advance */
3311 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3312 PGMPhysReleasePageMappingLock(pVM, &Lock);
3313 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3314 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3315 cb -= GUEST_PAGE_SIZE;
3316 }
3317 /* won't ever get here. */
3318}
3319
3320
3321/**
3322 * Write to guest physical memory referenced by GC pointer.
3323 *
3324 * This function uses the current CR3/CR0/CR4 of the guest and will
3325 * bypass access handlers and not set dirty or accessed bits.
3326 *
3327 * @returns VBox status code.
3328 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3329 * @param GCPtrDst The destination address (GC pointer).
3330 * @param pvSrc The source address.
3331 * @param cb The number of bytes to write.
3332 */
3333VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3334{
3335 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3336 VMCPU_ASSERT_EMT(pVCpu);
3337
3338 /*
3339 * Treat the first page as a special case.
3340 */
3341 if (!cb)
3342 return VINF_SUCCESS;
3343
3344 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3345 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3346
3347 /* map the 1st page */
3348 void *pvDst;
3349 PGMPAGEMAPLOCK Lock;
3350 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3351 if (RT_FAILURE(rc))
3352 return rc;
3353
3354 /* optimize for the case where access is completely within the first page. */
3355 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3356 if (RT_LIKELY(cb <= cbPage))
3357 {
3358 memcpy(pvDst, pvSrc, cb);
3359 PGMPhysReleasePageMappingLock(pVM, &Lock);
3360 return VINF_SUCCESS;
3361 }
3362
3363 /* copy to the end of the page. */
3364 memcpy(pvDst, pvSrc, cbPage);
3365 PGMPhysReleasePageMappingLock(pVM, &Lock);
3366 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3367 pvSrc = (const uint8_t *)pvSrc + cbPage;
3368 cb -= cbPage;
3369
3370 /*
3371 * Page by page.
3372 */
3373 for (;;)
3374 {
3375 /* map the page */
3376 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3377 if (RT_FAILURE(rc))
3378 return rc;
3379
3380 /* last page? */
3381 if (cb <= GUEST_PAGE_SIZE)
3382 {
3383 memcpy(pvDst, pvSrc, cb);
3384 PGMPhysReleasePageMappingLock(pVM, &Lock);
3385 return VINF_SUCCESS;
3386 }
3387
3388 /* copy the entire page and advance */
3389 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3390 PGMPhysReleasePageMappingLock(pVM, &Lock);
3391 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3392 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3393 cb -= GUEST_PAGE_SIZE;
3394 }
3395 /* won't ever get here. */
3396}
3397
3398
3399/**
3400 * Write to guest physical memory referenced by GC pointer and update the PTE.
3401 *
3402 * This function uses the current CR3/CR0/CR4 of the guest and will
3403 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3404 *
3405 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3406 *
3407 * @returns VBox status code.
3408 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3409 * @param GCPtrDst The destination address (GC pointer).
3410 * @param pvSrc The source address.
3411 * @param cb The number of bytes to write.
3412 */
3413VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3414{
3415 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3416 VMCPU_ASSERT_EMT(pVCpu);
3417
3418 /*
3419 * Treat the first page as a special case.
3420 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3421 */
3422 if (!cb)
3423 return VINF_SUCCESS;
3424
3425 /* map the 1st page */
3426 void *pvDst;
3427 PGMPAGEMAPLOCK Lock;
3428 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3429 if (RT_FAILURE(rc))
3430 return rc;
3431
3432 /* optimize for the case where access is completely within the first page. */
3433 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3434 if (RT_LIKELY(cb <= cbPage))
3435 {
3436 memcpy(pvDst, pvSrc, cb);
3437 PGMPhysReleasePageMappingLock(pVM, &Lock);
3438 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3439 return VINF_SUCCESS;
3440 }
3441
3442 /* copy to the end of the page. */
3443 memcpy(pvDst, pvSrc, cbPage);
3444 PGMPhysReleasePageMappingLock(pVM, &Lock);
3445 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3446 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3447 pvSrc = (const uint8_t *)pvSrc + cbPage;
3448 cb -= cbPage;
3449
3450 /*
3451 * Page by page.
3452 */
3453 for (;;)
3454 {
3455 /* map the page */
3456 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3457 if (RT_FAILURE(rc))
3458 return rc;
3459
3460 /* last page? */
3461 if (cb <= GUEST_PAGE_SIZE)
3462 {
3463 memcpy(pvDst, pvSrc, cb);
3464 PGMPhysReleasePageMappingLock(pVM, &Lock);
3465 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3466 return VINF_SUCCESS;
3467 }
3468
3469 /* copy the entire page and advance */
3470 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3471 PGMPhysReleasePageMappingLock(pVM, &Lock);
3472 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3473 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3474 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3475 cb -= GUEST_PAGE_SIZE;
3476 }
3477 /* won't ever get here. */
3478}
3479
3480
3481/**
3482 * Read from guest physical memory referenced by GC pointer.
3483 *
3484 * This function uses the current CR3/CR0/CR4 of the guest and will
3485 * respect access handlers and set accessed bits.
3486 *
3487 * @returns Strict VBox status, see PGMPhysRead for details.
3488 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3489 * specified virtual address.
3490 *
3491 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3492 * @param pvDst The destination address.
3493 * @param GCPtrSrc The source address (GC pointer).
3494 * @param cb The number of bytes to read.
3495 * @param enmOrigin Who is calling.
3496 * @thread EMT(pVCpu)
3497 */
3498VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3499{
3500 int rc;
3501 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3502 VMCPU_ASSERT_EMT(pVCpu);
3503
3504 /*
3505 * Anything to do?
3506 */
3507 if (!cb)
3508 return VINF_SUCCESS;
3509
3510 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3511
3512 /*
3513 * Optimize reads within a single page.
3514 */
3515 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3516 {
3517 /* Convert virtual to physical address + flags */
3518 PGMPTWALK Walk;
3519 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3520 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3521 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3522
3523 /* mark the guest page as accessed. */
3524 if (!(Walk.fEffective & X86_PTE_A))
3525 {
3526 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3527 AssertRC(rc);
3528 }
3529
3530 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3531 }
3532
3533 /*
3534 * Page by page.
3535 */
3536 for (;;)
3537 {
3538 /* Convert virtual to physical address + flags */
3539 PGMPTWALK Walk;
3540 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3541 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3542 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3543
3544 /* mark the guest page as accessed. */
3545 if (!(Walk.fEffective & X86_PTE_A))
3546 {
3547 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3548 AssertRC(rc);
3549 }
3550
3551 /* copy */
3552 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3553 if (cbRead < cb)
3554 {
3555 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3556 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3557 { /* likely */ }
3558 else
3559 return rcStrict;
3560 }
3561 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3562 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3563
3564 /* next */
3565 Assert(cb > cbRead);
3566 cb -= cbRead;
3567 pvDst = (uint8_t *)pvDst + cbRead;
3568 GCPtrSrc += cbRead;
3569 }
3570}
3571
3572
3573/**
3574 * Write to guest physical memory referenced by GC pointer.
3575 *
3576 * This function uses the current CR3/CR0/CR4 of the guest and will
3577 * respect access handlers and set dirty and accessed bits.
3578 *
3579 * @returns Strict VBox status, see PGMPhysWrite for details.
3580 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3581 * specified virtual address.
3582 *
3583 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3584 * @param GCPtrDst The destination address (GC pointer).
3585 * @param pvSrc The source address.
3586 * @param cb The number of bytes to write.
3587 * @param enmOrigin Who is calling.
3588 */
3589VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3590{
3591 int rc;
3592 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3593 VMCPU_ASSERT_EMT(pVCpu);
3594
3595 /*
3596 * Anything to do?
3597 */
3598 if (!cb)
3599 return VINF_SUCCESS;
3600
3601 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3602
3603 /*
3604 * Optimize writes within a single page.
3605 */
3606 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3607 {
3608 /* Convert virtual to physical address + flags */
3609 PGMPTWALK Walk;
3610 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3611 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3612 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3613
3614 /* Mention when we ignore X86_PTE_RW... */
3615 if (!(Walk.fEffective & X86_PTE_RW))
3616 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3617
3618 /* Mark the guest page as accessed and dirty if necessary. */
3619 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3620 {
3621 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3622 AssertRC(rc);
3623 }
3624
3625 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3626 }
3627
3628 /*
3629 * Page by page.
3630 */
3631 for (;;)
3632 {
3633 /* Convert virtual to physical address + flags */
3634 PGMPTWALK Walk;
3635 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3636 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3637 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3638
3639 /* Mention when we ignore X86_PTE_RW... */
3640 if (!(Walk.fEffective & X86_PTE_RW))
3641 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3642
3643 /* Mark the guest page as accessed and dirty if necessary. */
3644 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3645 {
3646 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3647 AssertRC(rc);
3648 }
3649
3650 /* copy */
3651 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3652 if (cbWrite < cb)
3653 {
3654 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3655 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3656 { /* likely */ }
3657 else
3658 return rcStrict;
3659 }
3660 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3661 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3662
3663 /* next */
3664 Assert(cb > cbWrite);
3665 cb -= cbWrite;
3666 pvSrc = (uint8_t *)pvSrc + cbWrite;
3667 GCPtrDst += cbWrite;
3668 }
3669}
3670
3671
3672/**
3673 * Return the page type of the specified physical address.
3674 *
3675 * @returns The page type.
3676 * @param pVM The cross context VM structure.
3677 * @param GCPhys Guest physical address
3678 */
3679VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3680{
3681 PGM_LOCK_VOID(pVM);
3682 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3683 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3684 PGM_UNLOCK(pVM);
3685
3686 return enmPgType;
3687}
3688
3689
3690/**
3691 * Converts a GC physical address to a HC ring-3 pointer, with some
3692 * additional checks.
3693 *
3694 * @returns VBox status code (no informational statuses).
3695 *
3696 * @param pVM The cross context VM structure.
3697 * @param pVCpu The cross context virtual CPU structure of the
3698 * calling EMT.
3699 * @param GCPhys The GC physical address to convert. This API mask
3700 * the A20 line when necessary.
3701 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3702 * be done while holding the PGM lock.
3703 * @param ppb Where to store the pointer corresponding to GCPhys
3704 * on success.
3705 * @param pfTlb The TLB flags and revision. We only add stuff.
3706 *
3707 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3708 * PGMPhysIemGCPhys2Ptr.
3709 *
3710 * @thread EMT(pVCpu).
3711 */
3712VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3713 R3R0PTRTYPE(uint8_t *) *ppb,
3714 uint64_t *pfTlb)
3715{
3716 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3717 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3718
3719 PGM_LOCK_VOID(pVM);
3720
3721 PPGMRAMRANGE pRam;
3722 PPGMPAGE pPage;
3723 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3724 if (RT_SUCCESS(rc))
3725 {
3726 if (!PGM_PAGE_IS_BALLOONED(pPage))
3727 {
3728 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3729 {
3730 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3731 {
3732 /*
3733 * No access handler.
3734 */
3735 switch (PGM_PAGE_GET_STATE(pPage))
3736 {
3737 case PGM_PAGE_STATE_ALLOCATED:
3738 *pfTlb |= *puTlbPhysRev;
3739 break;
3740 case PGM_PAGE_STATE_BALLOONED:
3741 AssertFailed();
3742 RT_FALL_THRU();
3743 case PGM_PAGE_STATE_ZERO:
3744 case PGM_PAGE_STATE_SHARED:
3745 case PGM_PAGE_STATE_WRITE_MONITORED:
3746 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3747 break;
3748 }
3749
3750 PPGMPAGEMAPTLBE pTlbe;
3751 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3752 AssertLogRelRCReturn(rc, rc);
3753 *ppb = (uint8_t *)pTlbe->pv;
3754 }
3755 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3756 {
3757 /*
3758 * MMIO or similar all access handler: Catch all access.
3759 */
3760 *pfTlb |= *puTlbPhysRev
3761 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3762 *ppb = NULL;
3763 }
3764 else
3765 {
3766 /*
3767 * Write access handler: Catch write accesses if active.
3768 */
3769 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3770 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3771 else
3772 switch (PGM_PAGE_GET_STATE(pPage))
3773 {
3774 case PGM_PAGE_STATE_ALLOCATED:
3775 *pfTlb |= *puTlbPhysRev;
3776 break;
3777 case PGM_PAGE_STATE_BALLOONED:
3778 AssertFailed();
3779 RT_FALL_THRU();
3780 case PGM_PAGE_STATE_ZERO:
3781 case PGM_PAGE_STATE_SHARED:
3782 case PGM_PAGE_STATE_WRITE_MONITORED:
3783 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3784 break;
3785 }
3786
3787 PPGMPAGEMAPTLBE pTlbe;
3788 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3789 AssertLogRelRCReturn(rc, rc);
3790 *ppb = (uint8_t *)pTlbe->pv;
3791 }
3792 }
3793 else
3794 {
3795 /* Alias MMIO: For now, we catch all access. */
3796 *pfTlb |= *puTlbPhysRev
3797 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3798 *ppb = NULL;
3799 }
3800 }
3801 else
3802 {
3803 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3804 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3805 *ppb = NULL;
3806 }
3807 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3808 }
3809 else
3810 {
3811 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ
3812 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 | PGMIEMGCPHYS2PTR_F_UNASSIGNED;
3813 *ppb = NULL;
3814 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3815 }
3816
3817 PGM_UNLOCK(pVM);
3818 return VINF_SUCCESS;
3819}
3820
3821
3822/**
3823 * Converts a GC physical address to a HC ring-3 pointer, with some
3824 * additional checks.
3825 *
3826 * @returns VBox status code (no informational statuses).
3827 * @retval VINF_SUCCESS on success.
3828 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3829 * access handler of some kind.
3830 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3831 * accesses or is odd in any way.
3832 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3833 *
3834 * @param pVM The cross context VM structure.
3835 * @param pVCpu The cross context virtual CPU structure of the
3836 * calling EMT.
3837 * @param GCPhys The GC physical address to convert. This API mask
3838 * the A20 line when necessary.
3839 * @param fWritable Whether write access is required.
3840 * @param fByPassHandlers Whether to bypass access handlers.
3841 * @param ppv Where to store the pointer corresponding to GCPhys
3842 * on success.
3843 * @param pLock
3844 *
3845 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3846 * @thread EMT(pVCpu).
3847 */
3848VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3849 void **ppv, PPGMPAGEMAPLOCK pLock)
3850{
3851 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3852
3853 PGM_LOCK_VOID(pVM);
3854
3855 PPGMRAMRANGE pRam;
3856 PPGMPAGE pPage;
3857 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3858 if (RT_SUCCESS(rc))
3859 {
3860 if (PGM_PAGE_IS_BALLOONED(pPage))
3861 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3862 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3863 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3864 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3865 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3866 rc = VINF_SUCCESS;
3867 else
3868 {
3869 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3870 {
3871 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3872 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3873 }
3874 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3875 {
3876 Assert(!fByPassHandlers);
3877 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3878 }
3879 }
3880 if (RT_SUCCESS(rc))
3881 {
3882 int rc2;
3883
3884 /* Make sure what we return is writable. */
3885 if (fWritable)
3886 switch (PGM_PAGE_GET_STATE(pPage))
3887 {
3888 case PGM_PAGE_STATE_ALLOCATED:
3889 break;
3890 case PGM_PAGE_STATE_BALLOONED:
3891 AssertFailed();
3892 break;
3893 case PGM_PAGE_STATE_ZERO:
3894 case PGM_PAGE_STATE_SHARED:
3895 case PGM_PAGE_STATE_WRITE_MONITORED:
3896 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3897 AssertLogRelRCReturn(rc2, rc2);
3898 break;
3899 }
3900
3901 /* Get a ring-3 mapping of the address. */
3902 PPGMPAGEMAPTLBE pTlbe;
3903 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3904 AssertLogRelRCReturn(rc2, rc2);
3905
3906 /* Lock it and calculate the address. */
3907 if (fWritable)
3908 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3909 else
3910 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3911 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3912
3913 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3914 }
3915 else
3916 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3917
3918 /* else: handler catching all access, no pointer returned. */
3919 }
3920 else
3921 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3922
3923 PGM_UNLOCK(pVM);
3924 return rc;
3925}
3926
3927
3928/**
3929 * Checks if the give GCPhys page requires special handling for the given access
3930 * because it's MMIO or otherwise monitored.
3931 *
3932 * @returns VBox status code (no informational statuses).
3933 * @retval VINF_SUCCESS on success.
3934 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3935 * access handler of some kind.
3936 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3937 * accesses or is odd in any way.
3938 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3939 *
3940 * @param pVM The cross context VM structure.
3941 * @param GCPhys The GC physical address to convert. Since this is
3942 * only used for filling the REM TLB, the A20 mask must
3943 * be applied before calling this API.
3944 * @param fWritable Whether write access is required.
3945 * @param fByPassHandlers Whether to bypass access handlers.
3946 *
3947 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3948 * a stop gap thing that should be removed once there is a better TLB
3949 * for virtual address accesses.
3950 */
3951VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3952{
3953 PGM_LOCK_VOID(pVM);
3954 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3955
3956 PPGMRAMRANGE pRam;
3957 PPGMPAGE pPage;
3958 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3959 if (RT_SUCCESS(rc))
3960 {
3961 if (PGM_PAGE_IS_BALLOONED(pPage))
3962 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3963 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3964 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3965 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3966 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3967 rc = VINF_SUCCESS;
3968 else
3969 {
3970 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3971 {
3972 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3973 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3974 }
3975 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3976 {
3977 Assert(!fByPassHandlers);
3978 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3979 }
3980 }
3981 }
3982
3983 PGM_UNLOCK(pVM);
3984 return rc;
3985}
3986
3987#ifdef VBOX_WITH_NATIVE_NEM
3988
3989/**
3990 * Interface used by NEM to check what to do on a memory access exit.
3991 *
3992 * @returns VBox status code.
3993 * @param pVM The cross context VM structure.
3994 * @param pVCpu The cross context per virtual CPU structure.
3995 * Optional.
3996 * @param GCPhys The guest physical address.
3997 * @param fMakeWritable Whether to try make the page writable or not. If it
3998 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
3999 * be returned and the return code will be unaffected
4000 * @param pInfo Where to return the page information. This is
4001 * initialized even on failure.
4002 * @param pfnChecker Page in-sync checker callback. Optional.
4003 * @param pvUser User argument to pass to pfnChecker.
4004 */
4005VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4006 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4007{
4008 PGM_LOCK_VOID(pVM);
4009
4010 PPGMPAGE pPage;
4011 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4012 if (RT_SUCCESS(rc))
4013 {
4014 /* Try make it writable if requested. */
4015 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4016 if (fMakeWritable)
4017 switch (PGM_PAGE_GET_STATE(pPage))
4018 {
4019 case PGM_PAGE_STATE_SHARED:
4020 case PGM_PAGE_STATE_WRITE_MONITORED:
4021 case PGM_PAGE_STATE_ZERO:
4022 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4023 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4024 rc = VINF_SUCCESS;
4025 break;
4026 }
4027
4028 /* Fill in the info. */
4029 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4030 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4031 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4032 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4033 pInfo->enmType = enmType;
4034 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4035 switch (PGM_PAGE_GET_STATE(pPage))
4036 {
4037 case PGM_PAGE_STATE_ALLOCATED:
4038 pInfo->fZeroPage = 0;
4039 break;
4040
4041 case PGM_PAGE_STATE_ZERO:
4042 pInfo->fZeroPage = 1;
4043 break;
4044
4045 case PGM_PAGE_STATE_WRITE_MONITORED:
4046 pInfo->fZeroPage = 0;
4047 break;
4048
4049 case PGM_PAGE_STATE_SHARED:
4050 pInfo->fZeroPage = 0;
4051 break;
4052
4053 case PGM_PAGE_STATE_BALLOONED:
4054 pInfo->fZeroPage = 1;
4055 break;
4056
4057 default:
4058 pInfo->fZeroPage = 1;
4059 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4060 }
4061
4062 /* Call the checker and update NEM state. */
4063 if (pfnChecker)
4064 {
4065 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4066 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4067 }
4068
4069 /* Done. */
4070 PGM_UNLOCK(pVM);
4071 }
4072 else
4073 {
4074 PGM_UNLOCK(pVM);
4075
4076 pInfo->HCPhys = NIL_RTHCPHYS;
4077 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4078 pInfo->u2NemState = 0;
4079 pInfo->fHasHandlers = 0;
4080 pInfo->fZeroPage = 0;
4081 pInfo->enmType = PGMPAGETYPE_INVALID;
4082 }
4083
4084 return rc;
4085}
4086
4087
4088/**
4089 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4090 * or higher.
4091 *
4092 * @returns VBox status code from callback.
4093 * @param pVM The cross context VM structure.
4094 * @param pVCpu The cross context per CPU structure. This is
4095 * optional as its only for passing to callback.
4096 * @param uMinState The minimum NEM state value to call on.
4097 * @param pfnCallback The callback function.
4098 * @param pvUser User argument for the callback.
4099 */
4100VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4101 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4102{
4103 /*
4104 * Just brute force this problem.
4105 */
4106 PGM_LOCK_VOID(pVM);
4107 int rc = VINF_SUCCESS;
4108 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4109 {
4110 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4111 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4112 {
4113 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4114 if (u2State < uMinState)
4115 { /* likely */ }
4116 else
4117 {
4118 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4119 if (RT_SUCCESS(rc))
4120 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4121 else
4122 break;
4123 }
4124 }
4125 }
4126 PGM_UNLOCK(pVM);
4127
4128 return rc;
4129}
4130
4131
4132/**
4133 * Helper for setting the NEM state for a range of pages.
4134 *
4135 * @param paPages Array of pages to modify.
4136 * @param cPages How many pages to modify.
4137 * @param u2State The new state value.
4138 */
4139void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4140{
4141 PPGMPAGE pPage = paPages;
4142 while (cPages-- > 0)
4143 {
4144 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4145 pPage++;
4146 }
4147}
4148
4149#endif /* VBOX_WITH_NATIVE_NEM */
4150
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette