VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 102727

Last change on this file since 102727 was 102663, checked in by vboxsync, 14 months ago

VMM/IEM: Working on BODY_CHECK_PC_AFTER_BRANCH and sideeffects of it. Fixed bug in 8-bit register stores (AMD64). Fixed bug in iemNativeEmitBltInCheckOpcodes (AMD64). Added a way to inject state logging between each instruction, currently only really implemented for AMD64. Relaxed the heave flushing code, no need to set the buffer pointer to NULL. Started looking at avoiding code TLB flushing when allocating memory to replace zero pages. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 147.9 KB
Line 
1/* $Id: PGMAllPhys.cpp 102663 2023-12-21 01:55:07Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include "PGMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include "PGMInline.h"
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <iprt/assert.h>
47#include <iprt/string.h>
48#include <VBox/log.h>
49#ifdef IN_RING3
50# include <iprt/thread.h>
51#endif
52
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57/** Enable the physical TLB. */
58#define PGM_WITH_PHYS_TLB
59
60/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
61 * Checks if valid physical access handler return code (normal handler, not PF).
62 *
63 * Checks if the given strict status code is one of the expected ones for a
64 * physical access handler in the current context.
65 *
66 * @returns true or false.
67 * @param a_rcStrict The status code.
68 * @param a_fWrite Whether it is a write or read being serviced.
69 *
70 * @remarks We wish to keep the list of statuses here as short as possible.
71 * When changing, please make sure to update the PGMPhysRead,
72 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
73 */
74#ifdef IN_RING3
75# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
76 ( (a_rcStrict) == VINF_SUCCESS \
77 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
78#elif defined(IN_RING0)
79#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
80 ( (a_rcStrict) == VINF_SUCCESS \
81 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
82 \
83 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
84 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
85 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
86 \
87 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
88 || (a_rcStrict) == VINF_EM_DBG_STOP \
89 || (a_rcStrict) == VINF_EM_DBG_EVENT \
90 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
91 || (a_rcStrict) == VINF_EM_OFF \
92 || (a_rcStrict) == VINF_EM_SUSPEND \
93 || (a_rcStrict) == VINF_EM_RESET \
94 )
95#else
96# error "Context?"
97#endif
98
99/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
100 * Checks if valid virtual access handler return code (normal handler, not PF).
101 *
102 * Checks if the given strict status code is one of the expected ones for a
103 * virtual access handler in the current context.
104 *
105 * @returns true or false.
106 * @param a_rcStrict The status code.
107 * @param a_fWrite Whether it is a write or read being serviced.
108 *
109 * @remarks We wish to keep the list of statuses here as short as possible.
110 * When changing, please make sure to update the PGMPhysRead,
111 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
112 */
113#ifdef IN_RING3
114# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
115 ( (a_rcStrict) == VINF_SUCCESS \
116 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
117#elif defined(IN_RING0)
118# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
119 (false /* no virtual handlers in ring-0! */ )
120#else
121# error "Context?"
122#endif
123
124
125
126/**
127 * Calculate the actual table size.
128 *
129 * The memory is layed out like this:
130 * - PGMPHYSHANDLERTREE (8 bytes)
131 * - Allocation bitmap (8-byte size align)
132 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
133 */
134uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
135{
136 /*
137 * A minimum of 64 entries and a maximum of ~64K.
138 */
139 uint32_t cEntries = *pcEntries;
140 if (cEntries <= 64)
141 cEntries = 64;
142 else if (cEntries >= _64K)
143 cEntries = _64K;
144 else
145 cEntries = RT_ALIGN_32(cEntries, 16);
146
147 /*
148 * Do the initial calculation.
149 */
150 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
151 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
152 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
153 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
154
155 /*
156 * Align the total and try use up extra space from that.
157 */
158 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
159 uint32_t cAvail = cbTotalAligned - cbTotal;
160 cAvail /= sizeof(PGMPHYSHANDLER);
161 if (cAvail >= 1)
162 for (;;)
163 {
164 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
165 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
166 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
167 cbTotal = cbTreeAndBitmap + cbTable;
168 if (cbTotal <= cbTotalAligned)
169 break;
170 cEntries--;
171 Assert(cEntries >= 16);
172 }
173
174 /*
175 * Return the result.
176 */
177 *pcbTreeAndBitmap = cbTreeAndBitmap;
178 *pcEntries = cEntries;
179 return cbTotalAligned;
180}
181
182
183/**
184 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
185 */
186DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
187{
188 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
189 if (pRom->GCPhys == GCPhys)
190 return pRom;
191 return NULL;
192}
193
194#ifndef IN_RING3
195
196/**
197 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
198 * \#PF access handler callback for guest ROM range write access.}
199 *
200 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
201 */
202DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
203 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
204
205{
206 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
207 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
208 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
209 int rc;
210 RT_NOREF(uErrorCode, pvFault);
211
212 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
213
214 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
215 switch (pRom->aPages[iPage].enmProt)
216 {
217 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
218 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
219 {
220 /*
221 * If it's a simple instruction which doesn't change the cpu state
222 * we will simply skip it. Otherwise we'll have to defer it to REM.
223 */
224 uint32_t cbOp;
225 PDISSTATE pDis = &pVCpu->pgm.s.Dis;
226 rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbOp);
227 if ( RT_SUCCESS(rc)
228 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
229 && !(pDis->x86.fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
230 {
231 switch (pDis->x86.bOpCode)
232 {
233 /** @todo Find other instructions we can safely skip, possibly
234 * adding this kind of detection to DIS or EM. */
235 case OP_MOV:
236 pCtx->rip += cbOp;
237 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
238 return VINF_SUCCESS;
239 }
240 }
241 break;
242 }
243
244 case PGMROMPROT_READ_RAM_WRITE_RAM:
245 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
246 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
247 AssertRC(rc);
248 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
249
250 case PGMROMPROT_READ_ROM_WRITE_RAM:
251 /* Handle it in ring-3 because it's *way* easier there. */
252 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
253 break;
254
255 default:
256 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
257 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
258 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
259 }
260
261 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
262 return VINF_EM_RAW_EMULATE_INSTR;
263}
264
265#endif /* !IN_RING3 */
266
267
268/**
269 * @callback_method_impl{FNPGMPHYSHANDLER,
270 * Access handler callback for ROM write accesses.}
271 *
272 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
273 */
274DECLCALLBACK(VBOXSTRICTRC)
275pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
276 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
277{
278 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
279 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
280 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
281 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
282 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
283
284 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
285 RT_NOREF(pVCpu, pvPhys, enmOrigin);
286
287 if (enmAccessType == PGMACCESSTYPE_READ)
288 {
289 switch (pRomPage->enmProt)
290 {
291 /*
292 * Take the default action.
293 */
294 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
295 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
296 case PGMROMPROT_READ_ROM_WRITE_RAM:
297 case PGMROMPROT_READ_RAM_WRITE_RAM:
298 return VINF_PGM_HANDLER_DO_DEFAULT;
299
300 default:
301 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
302 pRom->aPages[iPage].enmProt, iPage, GCPhys),
303 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
304 }
305 }
306 else
307 {
308 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
309 switch (pRomPage->enmProt)
310 {
311 /*
312 * Ignore writes.
313 */
314 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
315 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
316 return VINF_SUCCESS;
317
318 /*
319 * Write to the RAM page.
320 */
321 case PGMROMPROT_READ_ROM_WRITE_RAM:
322 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
323 {
324 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
325 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
326
327 /*
328 * Take the lock, do lazy allocation, map the page and copy the data.
329 *
330 * Note that we have to bypass the mapping TLB since it works on
331 * guest physical addresses and entering the shadow page would
332 * kind of screw things up...
333 */
334 PGM_LOCK_VOID(pVM);
335
336 PPGMPAGE pShadowPage = &pRomPage->Shadow;
337 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
338 {
339 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
340 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
341 }
342
343 void *pvDstPage;
344 int rc;
345#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
346 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
347 {
348 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
349 rc = VINF_SUCCESS;
350 }
351 else
352#endif
353 {
354 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
355 if (RT_SUCCESS(rc))
356 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
357 }
358 if (RT_SUCCESS(rc))
359 {
360 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
361 pRomPage->LiveSave.fWrittenTo = true;
362
363 AssertMsg( rc == VINF_SUCCESS
364 || ( rc == VINF_PGM_SYNC_CR3
365 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
366 , ("%Rrc\n", rc));
367 rc = VINF_SUCCESS;
368 }
369
370 PGM_UNLOCK(pVM);
371 return rc;
372 }
373
374 default:
375 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
376 pRom->aPages[iPage].enmProt, iPage, GCPhys),
377 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
378 }
379 }
380}
381
382
383/**
384 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
385 */
386static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
387{
388 /*
389 * Get the MMIO2 range.
390 */
391 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
392 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
393 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
394 Assert(pMmio2->idMmio2 == hMmio2);
395 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
396 VERR_INTERNAL_ERROR_4);
397
398 /*
399 * Get the page and make sure it's an MMIO2 page.
400 */
401 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
402 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
403 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
404
405 /*
406 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
407 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
408 * page is dirty, saving the need for additional storage (bitmap).)
409 */
410 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
411
412 /*
413 * Disable the handler for this page.
414 */
415 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
416 AssertRC(rc);
417#ifndef IN_RING3
418 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
419 {
420 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
421 AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT,
422 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
423 }
424#else
425 RT_NOREF(pVCpu, GCPtr);
426#endif
427 return VINF_SUCCESS;
428}
429
430
431#ifndef IN_RING3
432/**
433 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
434 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
435 *
436 * @remarks The @a uUser is the MMIO2 index.
437 */
438DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
439 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
440{
441 RT_NOREF(pVCpu, uErrorCode, pCtx);
442 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
443 if (RT_SUCCESS(rcStrict))
444 {
445 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
446 PGM_UNLOCK(pVM);
447 }
448 return rcStrict;
449}
450#endif /* !IN_RING3 */
451
452
453/**
454 * @callback_method_impl{FNPGMPHYSHANDLER,
455 * Access handler callback for MMIO2 dirty page tracing.}
456 *
457 * @remarks The @a uUser is the MMIO2 index.
458 */
459DECLCALLBACK(VBOXSTRICTRC)
460pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
461 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
462{
463 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
464 if (RT_SUCCESS(rcStrict))
465 {
466 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
467 PGM_UNLOCK(pVM);
468 if (rcStrict == VINF_SUCCESS)
469 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
470 }
471 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
472 return rcStrict;
473}
474
475
476/**
477 * Invalidates the RAM range TLBs.
478 *
479 * @param pVM The cross context VM structure.
480 */
481void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
482{
483 PGM_LOCK_VOID(pVM);
484 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
485 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
486 PGM_UNLOCK(pVM);
487}
488
489
490/**
491 * Tests if a value of type RTGCPHYS is negative if the type had been signed
492 * instead of unsigned.
493 *
494 * @returns @c true if negative, @c false if positive or zero.
495 * @param a_GCPhys The value to test.
496 * @todo Move me to iprt/types.h.
497 */
498#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
499
500
501/**
502 * Slow worker for pgmPhysGetRange.
503 *
504 * @copydoc pgmPhysGetRange
505 */
506PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
507{
508 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
509
510 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
511 while (pRam)
512 {
513 RTGCPHYS off = GCPhys - pRam->GCPhys;
514 if (off < pRam->cb)
515 {
516 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
517 return pRam;
518 }
519 if (RTGCPHYS_IS_NEGATIVE(off))
520 pRam = pRam->CTX_SUFF(pLeft);
521 else
522 pRam = pRam->CTX_SUFF(pRight);
523 }
524 return NULL;
525}
526
527
528/**
529 * Slow worker for pgmPhysGetRangeAtOrAbove.
530 *
531 * @copydoc pgmPhysGetRangeAtOrAbove
532 */
533PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
534{
535 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
536
537 PPGMRAMRANGE pLastLeft = NULL;
538 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
539 while (pRam)
540 {
541 RTGCPHYS off = GCPhys - pRam->GCPhys;
542 if (off < pRam->cb)
543 {
544 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
545 return pRam;
546 }
547 if (RTGCPHYS_IS_NEGATIVE(off))
548 {
549 pLastLeft = pRam;
550 pRam = pRam->CTX_SUFF(pLeft);
551 }
552 else
553 pRam = pRam->CTX_SUFF(pRight);
554 }
555 return pLastLeft;
556}
557
558
559/**
560 * Slow worker for pgmPhysGetPage.
561 *
562 * @copydoc pgmPhysGetPage
563 */
564PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
565{
566 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
567
568 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
569 while (pRam)
570 {
571 RTGCPHYS off = GCPhys - pRam->GCPhys;
572 if (off < pRam->cb)
573 {
574 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
575 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
576 }
577
578 if (RTGCPHYS_IS_NEGATIVE(off))
579 pRam = pRam->CTX_SUFF(pLeft);
580 else
581 pRam = pRam->CTX_SUFF(pRight);
582 }
583 return NULL;
584}
585
586
587/**
588 * Slow worker for pgmPhysGetPageEx.
589 *
590 * @copydoc pgmPhysGetPageEx
591 */
592int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
593{
594 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
595
596 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
597 while (pRam)
598 {
599 RTGCPHYS off = GCPhys - pRam->GCPhys;
600 if (off < pRam->cb)
601 {
602 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
603 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
604 return VINF_SUCCESS;
605 }
606
607 if (RTGCPHYS_IS_NEGATIVE(off))
608 pRam = pRam->CTX_SUFF(pLeft);
609 else
610 pRam = pRam->CTX_SUFF(pRight);
611 }
612
613 *ppPage = NULL;
614 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
615}
616
617
618/**
619 * Slow worker for pgmPhysGetPageAndRangeEx.
620 *
621 * @copydoc pgmPhysGetPageAndRangeEx
622 */
623int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
624{
625 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
626
627 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
628 while (pRam)
629 {
630 RTGCPHYS off = GCPhys - pRam->GCPhys;
631 if (off < pRam->cb)
632 {
633 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
634 *ppRam = pRam;
635 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
636 return VINF_SUCCESS;
637 }
638
639 if (RTGCPHYS_IS_NEGATIVE(off))
640 pRam = pRam->CTX_SUFF(pLeft);
641 else
642 pRam = pRam->CTX_SUFF(pRight);
643 }
644
645 *ppRam = NULL;
646 *ppPage = NULL;
647 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
648}
649
650
651/**
652 * Checks if Address Gate 20 is enabled or not.
653 *
654 * @returns true if enabled.
655 * @returns false if disabled.
656 * @param pVCpu The cross context virtual CPU structure.
657 */
658VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
659{
660 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
661 return pVCpu->pgm.s.fA20Enabled;
662}
663
664
665/**
666 * Validates a GC physical address.
667 *
668 * @returns true if valid.
669 * @returns false if invalid.
670 * @param pVM The cross context VM structure.
671 * @param GCPhys The physical address to validate.
672 */
673VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
674{
675 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
676 return pPage != NULL;
677}
678
679
680/**
681 * Checks if a GC physical address is a normal page,
682 * i.e. not ROM, MMIO or reserved.
683 *
684 * @returns true if normal.
685 * @returns false if invalid, ROM, MMIO or reserved page.
686 * @param pVM The cross context VM structure.
687 * @param GCPhys The physical address to check.
688 */
689VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
690{
691 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
692 return pPage
693 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
694}
695
696
697/**
698 * Converts a GC physical address to a HC physical address.
699 *
700 * @returns VINF_SUCCESS on success.
701 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
702 * page but has no physical backing.
703 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
704 * GC physical address.
705 *
706 * @param pVM The cross context VM structure.
707 * @param GCPhys The GC physical address to convert.
708 * @param pHCPhys Where to store the HC physical address on success.
709 */
710VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
711{
712 PGM_LOCK_VOID(pVM);
713 PPGMPAGE pPage;
714 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
715 if (RT_SUCCESS(rc))
716 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
717 PGM_UNLOCK(pVM);
718 return rc;
719}
720
721
722/**
723 * Invalidates all page mapping TLBs.
724 *
725 * @param pVM The cross context VM structure.
726 */
727void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
728{
729 PGM_LOCK_VOID(pVM);
730 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
731
732 /* Clear the R3 & R0 TLBs completely. */
733 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
734 {
735 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
736 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
737 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
738 }
739
740 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
741 {
742 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
743 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
744 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
745 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
746 }
747
748 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MISC);
749 PGM_UNLOCK(pVM);
750}
751
752
753/**
754 * Invalidates a page mapping TLB entry
755 *
756 * @param pVM The cross context VM structure.
757 * @param GCPhys GCPhys entry to flush
758 *
759 * @note Caller is responsible for calling IEMTlbInvalidateAllPhysicalAllCpus
760 * when needed.
761 */
762void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
763{
764 PGM_LOCK_ASSERT_OWNER(pVM);
765
766 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
767
768 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
769
770 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
771 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
772 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
773
774 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
775 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
776 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
777 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
778}
779
780
781/**
782 * Makes sure that there is at least one handy page ready for use.
783 *
784 * This will also take the appropriate actions when reaching water-marks.
785 *
786 * @returns VBox status code.
787 * @retval VINF_SUCCESS on success.
788 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
789 *
790 * @param pVM The cross context VM structure.
791 *
792 * @remarks Must be called from within the PGM critical section. It may
793 * nip back to ring-3/0 in some cases.
794 */
795static int pgmPhysEnsureHandyPage(PVMCC pVM)
796{
797 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
798
799 /*
800 * Do we need to do anything special?
801 */
802#ifdef IN_RING3
803 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
804#else
805 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
806#endif
807 {
808 /*
809 * Allocate pages only if we're out of them, or in ring-3, almost out.
810 */
811#ifdef IN_RING3
812 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
813#else
814 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
815#endif
816 {
817 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
818 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
819#ifdef IN_RING3
820 int rc = PGMR3PhysAllocateHandyPages(pVM);
821#else
822 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
823#endif
824 if (RT_UNLIKELY(rc != VINF_SUCCESS))
825 {
826 if (RT_FAILURE(rc))
827 return rc;
828 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
829 if (!pVM->pgm.s.cHandyPages)
830 {
831 LogRel(("PGM: no more handy pages!\n"));
832 return VERR_EM_NO_MEMORY;
833 }
834 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
835 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
836#ifndef IN_RING3
837 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
838#endif
839 }
840 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
841 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
842 ("%u\n", pVM->pgm.s.cHandyPages),
843 VERR_PGM_HANDY_PAGE_IPE);
844 }
845 else
846 {
847 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
848 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
849#ifndef IN_RING3
850 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
851 {
852 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
853 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
854 }
855#endif
856 }
857 }
858
859 return VINF_SUCCESS;
860}
861
862
863/**
864 * Replace a zero or shared page with new page that we can write to.
865 *
866 * @returns The following VBox status codes.
867 * @retval VINF_SUCCESS on success, pPage is modified.
868 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
869 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
870 *
871 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
872 *
873 * @param pVM The cross context VM structure.
874 * @param pPage The physical page tracking structure. This will
875 * be modified on success.
876 * @param GCPhys The address of the page.
877 *
878 * @remarks Must be called from within the PGM critical section. It may
879 * nip back to ring-3/0 in some cases.
880 *
881 * @remarks This function shouldn't really fail, however if it does
882 * it probably means we've screwed up the size of handy pages and/or
883 * the low-water mark. Or, that some device I/O is causing a lot of
884 * pages to be allocated while while the host is in a low-memory
885 * condition. This latter should be handled elsewhere and in a more
886 * controlled manner, it's on the @bugref{3170} todo list...
887 */
888int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
889{
890 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
891
892 /*
893 * Prereqs.
894 */
895 PGM_LOCK_ASSERT_OWNER(pVM);
896 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
897 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
898
899# ifdef PGM_WITH_LARGE_PAGES
900 /*
901 * Try allocate a large page if applicable.
902 */
903 if ( PGMIsUsingLargePages(pVM)
904 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
905 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
906 {
907 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
908 PPGMPAGE pBasePage;
909
910 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
911 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
912 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
913 {
914 rc = pgmPhysAllocLargePage(pVM, GCPhys);
915 if (rc == VINF_SUCCESS)
916 return rc;
917 }
918 /* Mark the base as type page table, so we don't check over and over again. */
919 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
920
921 /* fall back to 4KB pages. */
922 }
923# endif
924
925 /*
926 * Flush any shadow page table mappings of the page.
927 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
928 */
929 bool fFlushTLBs = false;
930 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
931 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
932
933 /*
934 * Ensure that we've got a page handy, take it and use it.
935 */
936 int rc2 = pgmPhysEnsureHandyPage(pVM);
937 if (RT_FAILURE(rc2))
938 {
939 if (fFlushTLBs)
940 PGM_INVL_ALL_VCPU_TLBS(pVM);
941 Assert(rc2 == VERR_EM_NO_MEMORY);
942 return rc2;
943 }
944 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
945 PGM_LOCK_ASSERT_OWNER(pVM);
946 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
947 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
948
949 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
950 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
951 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
952 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
953 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
954 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
955
956 /*
957 * There are one or two action to be taken the next time we allocate handy pages:
958 * - Tell the GMM (global memory manager) what the page is being used for.
959 * (Speeds up replacement operations - sharing and defragmenting.)
960 * - If the current backing is shared, it must be freed.
961 */
962 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
963 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
964
965 void const *pvSharedPage = NULL;
966 if (!PGM_PAGE_IS_SHARED(pPage))
967 {
968 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
969 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
970 pVM->pgm.s.cZeroPages--;
971 }
972 else
973 {
974 /* Mark this shared page for freeing/dereferencing. */
975 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
976 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
977
978 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
979 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
980 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
981 pVM->pgm.s.cSharedPages--;
982
983 /* Grab the address of the page so we can make a copy later on. (safe) */
984 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
985 AssertRC(rc);
986 }
987
988 /*
989 * Do the PGMPAGE modifications.
990 */
991 pVM->pgm.s.cPrivatePages++;
992 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
993 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
994 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
995 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
996 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
997 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID,
998 !pvSharedPage
999 ? IEMTLBPHYSFLUSHREASON_ALLOCATED : IEMTLBPHYSFLUSHREASON_ALLOCATED_FROM_SHARED);
1000
1001 /* Copy the shared page contents to the replacement page. */
1002 if (!pvSharedPage)
1003 { /* likely */ }
1004 else
1005 {
1006 /* Get the virtual address of the new page. */
1007 PGMPAGEMAPLOCK PgMpLck;
1008 void *pvNewPage;
1009 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
1010 if (RT_SUCCESS(rc))
1011 {
1012 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
1013 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1014 }
1015 }
1016
1017 if ( fFlushTLBs
1018 && rc != VINF_PGM_GCPHYS_ALIASED)
1019 PGM_INVL_ALL_VCPU_TLBS(pVM);
1020
1021 /*
1022 * Notify NEM about the mapping change for this page.
1023 *
1024 * Note! Shadow ROM pages are complicated as they can definitely be
1025 * allocated while not visible, so play safe.
1026 */
1027 if (VM_IS_NEM_ENABLED(pVM))
1028 {
1029 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1030 if ( enmType != PGMPAGETYPE_ROM_SHADOW
1031 || pgmPhysGetPage(pVM, GCPhys) == pPage)
1032 {
1033 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1034 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
1035 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1036 if (RT_SUCCESS(rc))
1037 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1038 else
1039 rc = rc2;
1040 }
1041 }
1042
1043 return rc;
1044}
1045
1046#ifdef PGM_WITH_LARGE_PAGES
1047
1048/**
1049 * Replace a 2 MB range of zero pages with new pages that we can write to.
1050 *
1051 * @returns The following VBox status codes.
1052 * @retval VINF_SUCCESS on success, pPage is modified.
1053 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1054 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
1055 *
1056 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
1057 *
1058 * @param pVM The cross context VM structure.
1059 * @param GCPhys The address of the page.
1060 *
1061 * @remarks Must be called from within the PGM critical section. It may block
1062 * on GMM and host mutexes/locks, leaving HM context.
1063 */
1064int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1065{
1066 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1067 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1068 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1069
1070 /*
1071 * Check Prereqs.
1072 */
1073 PGM_LOCK_ASSERT_OWNER(pVM);
1074 Assert(PGMIsUsingLargePages(pVM));
1075
1076 /*
1077 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1078 */
1079 PPGMPAGE pFirstPage;
1080 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1081 if ( RT_SUCCESS(rc)
1082 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1083 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1084 {
1085 /*
1086 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1087 * since they are unallocated.
1088 */
1089 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1090 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1091 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1092 {
1093 /*
1094 * Now, make sure all the other pages in the 2 MB is in the same state.
1095 */
1096 GCPhys = GCPhysBase;
1097 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1098 while (cLeft-- > 0)
1099 {
1100 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1101 if ( pSubPage
1102 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1103 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1104 {
1105 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1106 GCPhys += GUEST_PAGE_SIZE;
1107 }
1108 else
1109 {
1110 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1111 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1112
1113 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1114 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1115 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1116 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1117 }
1118 }
1119
1120 /*
1121 * Do the allocation.
1122 */
1123# ifdef IN_RING3
1124 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1125# elif defined(IN_RING0)
1126 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1127# else
1128# error "Port me"
1129# endif
1130 if (RT_SUCCESS(rc))
1131 {
1132 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1133 pVM->pgm.s.cLargePages++;
1134 return VINF_SUCCESS;
1135 }
1136
1137 /* If we fail once, it most likely means the host's memory is too
1138 fragmented; don't bother trying again. */
1139 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1140 return rc;
1141 }
1142 }
1143 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1144}
1145
1146
1147/**
1148 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1149 *
1150 * @returns The following VBox status codes.
1151 * @retval VINF_SUCCESS on success, the large page can be used again
1152 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1153 *
1154 * @param pVM The cross context VM structure.
1155 * @param GCPhys The address of the page.
1156 * @param pLargePage Page structure of the base page
1157 */
1158int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1159{
1160 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1161
1162 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1163
1164 AssertCompile(X86_PDE2M_PAE_PG_MASK == EPT_PDE2M_PG_MASK); /* Paranoia: Caller uses this for guest EPT tables as well. */
1165 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1166
1167 /* Check the base page. */
1168 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1169 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1170 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1171 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1172 {
1173 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1174 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1175 }
1176
1177 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1178 /* Check all remaining pages in the 2 MB range. */
1179 unsigned i;
1180 GCPhys += GUEST_PAGE_SIZE;
1181 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1182 {
1183 PPGMPAGE pPage;
1184 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1185 AssertRCBreak(rc);
1186
1187 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1188 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1189 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1190 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1191 {
1192 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1193 break;
1194 }
1195
1196 GCPhys += GUEST_PAGE_SIZE;
1197 }
1198 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1199
1200 if (i == _2M / GUEST_PAGE_SIZE)
1201 {
1202 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1203 pVM->pgm.s.cLargePagesDisabled--;
1204 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1205 return VINF_SUCCESS;
1206 }
1207
1208 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1209}
1210
1211#endif /* PGM_WITH_LARGE_PAGES */
1212
1213
1214/**
1215 * Deal with a write monitored page.
1216 *
1217 * @param pVM The cross context VM structure.
1218 * @param pPage The physical page tracking structure.
1219 * @param GCPhys The guest physical address of the page.
1220 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1221 * very unlikely situation where it is okay that we let NEM
1222 * fix the page access in a lazy fasion.
1223 *
1224 * @remarks Called from within the PGM critical section.
1225 */
1226void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1227{
1228 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1229 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1230 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1231 if (PGM_PAGE_IS_CODE_PAGE(pPage))
1232 {
1233 PGM_PAGE_CLEAR_CODE_PAGE(pVM, pPage);
1234 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MADE_WRITABLE);
1235 }
1236
1237 Assert(pVM->pgm.s.cMonitoredPages > 0);
1238 pVM->pgm.s.cMonitoredPages--;
1239 pVM->pgm.s.cWrittenToPages++;
1240
1241#ifdef VBOX_WITH_NATIVE_NEM
1242 /*
1243 * Notify NEM about the protection change so we won't spin forever.
1244 *
1245 * Note! NEM need to be handle to lazily correct page protection as we cannot
1246 * really get it 100% right here it seems. The page pool does this too.
1247 */
1248 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1249 {
1250 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1251 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1252 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1253 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1254 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1255 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1256 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1257 }
1258#else
1259 RT_NOREF(GCPhys);
1260#endif
1261}
1262
1263
1264/**
1265 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1266 *
1267 * @returns VBox strict status code.
1268 * @retval VINF_SUCCESS on success.
1269 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1270 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1271 *
1272 * @param pVM The cross context VM structure.
1273 * @param pPage The physical page tracking structure.
1274 * @param GCPhys The address of the page.
1275 *
1276 * @remarks Called from within the PGM critical section.
1277 */
1278int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1279{
1280 PGM_LOCK_ASSERT_OWNER(pVM);
1281 switch (PGM_PAGE_GET_STATE(pPage))
1282 {
1283 case PGM_PAGE_STATE_WRITE_MONITORED:
1284 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1285 RT_FALL_THRU();
1286 default: /* to shut up GCC */
1287 case PGM_PAGE_STATE_ALLOCATED:
1288 return VINF_SUCCESS;
1289
1290 /*
1291 * Zero pages can be dummy pages for MMIO or reserved memory,
1292 * so we need to check the flags before joining cause with
1293 * shared page replacement.
1294 */
1295 case PGM_PAGE_STATE_ZERO:
1296 if (PGM_PAGE_IS_MMIO(pPage))
1297 return VERR_PGM_PHYS_PAGE_RESERVED;
1298 RT_FALL_THRU();
1299 case PGM_PAGE_STATE_SHARED:
1300 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1301
1302 /* Not allowed to write to ballooned pages. */
1303 case PGM_PAGE_STATE_BALLOONED:
1304 return VERR_PGM_PHYS_PAGE_BALLOONED;
1305 }
1306}
1307
1308
1309/**
1310 * Internal usage: Map the page specified by its GMM ID.
1311 *
1312 * This is similar to pgmPhysPageMap
1313 *
1314 * @returns VBox status code.
1315 *
1316 * @param pVM The cross context VM structure.
1317 * @param idPage The Page ID.
1318 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1319 * @param ppv Where to store the mapping address.
1320 *
1321 * @remarks Called from within the PGM critical section. The mapping is only
1322 * valid while you are inside this section.
1323 */
1324int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1325{
1326 /*
1327 * Validation.
1328 */
1329 PGM_LOCK_ASSERT_OWNER(pVM);
1330 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1331 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1332 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1333
1334#ifdef IN_RING0
1335# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1336 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1337# else
1338 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1339# endif
1340
1341#else
1342 /*
1343 * Find/make Chunk TLB entry for the mapping chunk.
1344 */
1345 PPGMCHUNKR3MAP pMap;
1346 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1347 if (pTlbe->idChunk == idChunk)
1348 {
1349 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1350 pMap = pTlbe->pChunk;
1351 }
1352 else
1353 {
1354 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1355
1356 /*
1357 * Find the chunk, map it if necessary.
1358 */
1359 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1360 if (pMap)
1361 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1362 else
1363 {
1364 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1365 if (RT_FAILURE(rc))
1366 return rc;
1367 }
1368
1369 /*
1370 * Enter it into the Chunk TLB.
1371 */
1372 pTlbe->idChunk = idChunk;
1373 pTlbe->pChunk = pMap;
1374 }
1375
1376 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1377 return VINF_SUCCESS;
1378#endif
1379}
1380
1381
1382/**
1383 * Maps a page into the current virtual address space so it can be accessed.
1384 *
1385 * @returns VBox status code.
1386 * @retval VINF_SUCCESS on success.
1387 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1388 *
1389 * @param pVM The cross context VM structure.
1390 * @param pPage The physical page tracking structure.
1391 * @param GCPhys The address of the page.
1392 * @param ppMap Where to store the address of the mapping tracking structure.
1393 * @param ppv Where to store the mapping address of the page. The page
1394 * offset is masked off!
1395 *
1396 * @remarks Called from within the PGM critical section.
1397 */
1398static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1399{
1400 PGM_LOCK_ASSERT_OWNER(pVM);
1401 NOREF(GCPhys);
1402
1403 /*
1404 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1405 */
1406 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1407 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1408 {
1409 /* Decode the page id to a page in a MMIO2 ram range. */
1410 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1411 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1412 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1413 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1414 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1415 pPage->s.idPage, pPage->s.uStateY),
1416 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1417 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1418 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1419 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1420 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1421 *ppMap = NULL;
1422# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1423 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1424# elif defined(IN_RING0)
1425 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1426 return VINF_SUCCESS;
1427# else
1428 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1429 return VINF_SUCCESS;
1430# endif
1431 }
1432
1433# ifdef VBOX_WITH_PGM_NEM_MODE
1434 if (pVM->pgm.s.fNemMode)
1435 {
1436# ifdef IN_RING3
1437 /*
1438 * Find the corresponding RAM range and use that to locate the mapping address.
1439 */
1440 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1441 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1442 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1443 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1444 Assert(pPage == &pRam->aPages[idxPage]);
1445 *ppMap = NULL;
1446 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1447 return VINF_SUCCESS;
1448# else
1449 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1450# endif
1451 }
1452# endif
1453
1454 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1455 if (idChunk == NIL_GMM_CHUNKID)
1456 {
1457 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1458 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1459 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1460 {
1461 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1462 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1463 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1464 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1465 *ppv = pVM->pgm.s.abZeroPg;
1466 }
1467 else
1468 *ppv = pVM->pgm.s.abZeroPg;
1469 *ppMap = NULL;
1470 return VINF_SUCCESS;
1471 }
1472
1473# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1474 /*
1475 * Just use the physical address.
1476 */
1477 *ppMap = NULL;
1478 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1479
1480# elif defined(IN_RING0)
1481 /*
1482 * Go by page ID thru GMMR0.
1483 */
1484 *ppMap = NULL;
1485 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1486
1487# else
1488 /*
1489 * Find/make Chunk TLB entry for the mapping chunk.
1490 */
1491 PPGMCHUNKR3MAP pMap;
1492 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1493 if (pTlbe->idChunk == idChunk)
1494 {
1495 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1496 pMap = pTlbe->pChunk;
1497 AssertPtr(pMap->pv);
1498 }
1499 else
1500 {
1501 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1502
1503 /*
1504 * Find the chunk, map it if necessary.
1505 */
1506 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1507 if (pMap)
1508 {
1509 AssertPtr(pMap->pv);
1510 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1511 }
1512 else
1513 {
1514 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1515 if (RT_FAILURE(rc))
1516 return rc;
1517 AssertPtr(pMap->pv);
1518 }
1519
1520 /*
1521 * Enter it into the Chunk TLB.
1522 */
1523 pTlbe->idChunk = idChunk;
1524 pTlbe->pChunk = pMap;
1525 }
1526
1527 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1528 *ppMap = pMap;
1529 return VINF_SUCCESS;
1530# endif /* !IN_RING0 */
1531}
1532
1533
1534/**
1535 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1536 *
1537 * This is typically used is paths where we cannot use the TLB methods (like ROM
1538 * pages) or where there is no point in using them since we won't get many hits.
1539 *
1540 * @returns VBox strict status code.
1541 * @retval VINF_SUCCESS on success.
1542 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1543 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1544 *
1545 * @param pVM The cross context VM structure.
1546 * @param pPage The physical page tracking structure.
1547 * @param GCPhys The address of the page.
1548 * @param ppv Where to store the mapping address of the page. The page
1549 * offset is masked off!
1550 *
1551 * @remarks Called from within the PGM critical section. The mapping is only
1552 * valid while you are inside section.
1553 */
1554int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1555{
1556 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1557 if (RT_SUCCESS(rc))
1558 {
1559 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1560 PPGMPAGEMAP pMapIgnore;
1561 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1562 if (RT_FAILURE(rc2)) /* preserve rc */
1563 rc = rc2;
1564 }
1565 return rc;
1566}
1567
1568
1569/**
1570 * Maps a page into the current virtual address space so it can be accessed for
1571 * both writing and reading.
1572 *
1573 * This is typically used is paths where we cannot use the TLB methods (like ROM
1574 * pages) or where there is no point in using them since we won't get many hits.
1575 *
1576 * @returns VBox status code.
1577 * @retval VINF_SUCCESS on success.
1578 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1579 *
1580 * @param pVM The cross context VM structure.
1581 * @param pPage The physical page tracking structure. Must be in the
1582 * allocated state.
1583 * @param GCPhys The address of the page.
1584 * @param ppv Where to store the mapping address of the page. The page
1585 * offset is masked off!
1586 *
1587 * @remarks Called from within the PGM critical section. The mapping is only
1588 * valid while you are inside section.
1589 */
1590int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1591{
1592 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1593 PPGMPAGEMAP pMapIgnore;
1594 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1595}
1596
1597
1598/**
1599 * Maps a page into the current virtual address space so it can be accessed for
1600 * reading.
1601 *
1602 * This is typically used is paths where we cannot use the TLB methods (like ROM
1603 * pages) or where there is no point in using them since we won't get many hits.
1604 *
1605 * @returns VBox status code.
1606 * @retval VINF_SUCCESS on success.
1607 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1608 *
1609 * @param pVM The cross context VM structure.
1610 * @param pPage The physical page tracking structure.
1611 * @param GCPhys The address of the page.
1612 * @param ppv Where to store the mapping address of the page. The page
1613 * offset is masked off!
1614 *
1615 * @remarks Called from within the PGM critical section. The mapping is only
1616 * valid while you are inside this section.
1617 */
1618int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1619{
1620 PPGMPAGEMAP pMapIgnore;
1621 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1622}
1623
1624
1625/**
1626 * Load a guest page into the ring-3 physical TLB.
1627 *
1628 * @returns VBox status code.
1629 * @retval VINF_SUCCESS on success
1630 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1631 * @param pVM The cross context VM structure.
1632 * @param GCPhys The guest physical address in question.
1633 */
1634int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1635{
1636 PGM_LOCK_ASSERT_OWNER(pVM);
1637
1638 /*
1639 * Find the ram range and page and hand it over to the with-page function.
1640 * 99.8% of requests are expected to be in the first range.
1641 */
1642 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1643 if (!pPage)
1644 {
1645 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1646 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1647 }
1648
1649 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1650}
1651
1652
1653/**
1654 * Load a guest page into the ring-3 physical TLB.
1655 *
1656 * @returns VBox status code.
1657 * @retval VINF_SUCCESS on success
1658 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1659 *
1660 * @param pVM The cross context VM structure.
1661 * @param pPage Pointer to the PGMPAGE structure corresponding to
1662 * GCPhys.
1663 * @param GCPhys The guest physical address in question.
1664 */
1665int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1666{
1667 PGM_LOCK_ASSERT_OWNER(pVM);
1668 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1669
1670 /*
1671 * Map the page.
1672 * Make a special case for the zero page as it is kind of special.
1673 */
1674 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1675 if ( !PGM_PAGE_IS_ZERO(pPage)
1676 && !PGM_PAGE_IS_BALLOONED(pPage))
1677 {
1678 void *pv;
1679 PPGMPAGEMAP pMap;
1680 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1681 if (RT_FAILURE(rc))
1682 return rc;
1683# ifndef IN_RING0
1684 pTlbe->pMap = pMap;
1685# endif
1686 pTlbe->pv = pv;
1687 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1688 }
1689 else
1690 {
1691 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1692# ifndef IN_RING0
1693 pTlbe->pMap = NULL;
1694# endif
1695 pTlbe->pv = pVM->pgm.s.abZeroPg;
1696 }
1697# ifdef PGM_WITH_PHYS_TLB
1698 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1699 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1700 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1701 else
1702 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1703# else
1704 pTlbe->GCPhys = NIL_RTGCPHYS;
1705# endif
1706 pTlbe->pPage = pPage;
1707 return VINF_SUCCESS;
1708}
1709
1710
1711/**
1712 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1713 * own the PGM lock and therefore not need to lock the mapped page.
1714 *
1715 * @returns VBox status code.
1716 * @retval VINF_SUCCESS on success.
1717 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1718 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1719 *
1720 * @param pVM The cross context VM structure.
1721 * @param GCPhys The guest physical address of the page that should be mapped.
1722 * @param pPage Pointer to the PGMPAGE structure for the page.
1723 * @param ppv Where to store the address corresponding to GCPhys.
1724 *
1725 * @internal
1726 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1727 */
1728int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1729{
1730 int rc;
1731 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1732 PGM_LOCK_ASSERT_OWNER(pVM);
1733 pVM->pgm.s.cDeprecatedPageLocks++;
1734
1735 /*
1736 * Make sure the page is writable.
1737 */
1738 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1739 {
1740 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1741 if (RT_FAILURE(rc))
1742 return rc;
1743 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1744 }
1745 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1746
1747 /*
1748 * Get the mapping address.
1749 */
1750 PPGMPAGEMAPTLBE pTlbe;
1751 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1752 if (RT_FAILURE(rc))
1753 return rc;
1754 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1755 return VINF_SUCCESS;
1756}
1757
1758
1759/**
1760 * Locks a page mapping for writing.
1761 *
1762 * @param pVM The cross context VM structure.
1763 * @param pPage The page.
1764 * @param pTlbe The mapping TLB entry for the page.
1765 * @param pLock The lock structure (output).
1766 */
1767DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1768{
1769# ifndef IN_RING0
1770 PPGMPAGEMAP pMap = pTlbe->pMap;
1771 if (pMap)
1772 pMap->cRefs++;
1773# else
1774 RT_NOREF(pTlbe);
1775# endif
1776
1777 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1778 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1779 {
1780 if (cLocks == 0)
1781 pVM->pgm.s.cWriteLockedPages++;
1782 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1783 }
1784 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1785 {
1786 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1787 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1788# ifndef IN_RING0
1789 if (pMap)
1790 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1791# endif
1792 }
1793
1794 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1795# ifndef IN_RING0
1796 pLock->pvMap = pMap;
1797# else
1798 pLock->pvMap = NULL;
1799# endif
1800}
1801
1802/**
1803 * Locks a page mapping for reading.
1804 *
1805 * @param pVM The cross context VM structure.
1806 * @param pPage The page.
1807 * @param pTlbe The mapping TLB entry for the page.
1808 * @param pLock The lock structure (output).
1809 */
1810DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1811{
1812# ifndef IN_RING0
1813 PPGMPAGEMAP pMap = pTlbe->pMap;
1814 if (pMap)
1815 pMap->cRefs++;
1816# else
1817 RT_NOREF(pTlbe);
1818# endif
1819
1820 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1821 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1822 {
1823 if (cLocks == 0)
1824 pVM->pgm.s.cReadLockedPages++;
1825 PGM_PAGE_INC_READ_LOCKS(pPage);
1826 }
1827 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1828 {
1829 PGM_PAGE_INC_READ_LOCKS(pPage);
1830 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1831# ifndef IN_RING0
1832 if (pMap)
1833 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1834# endif
1835 }
1836
1837 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1838# ifndef IN_RING0
1839 pLock->pvMap = pMap;
1840# else
1841 pLock->pvMap = NULL;
1842# endif
1843}
1844
1845
1846/**
1847 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1848 * own the PGM lock and have access to the page structure.
1849 *
1850 * @returns VBox status code.
1851 * @retval VINF_SUCCESS on success.
1852 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1853 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1854 *
1855 * @param pVM The cross context VM structure.
1856 * @param GCPhys The guest physical address of the page that should be mapped.
1857 * @param pPage Pointer to the PGMPAGE structure for the page.
1858 * @param ppv Where to store the address corresponding to GCPhys.
1859 * @param pLock Where to store the lock information that
1860 * pgmPhysReleaseInternalPageMappingLock needs.
1861 *
1862 * @internal
1863 */
1864int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1865{
1866 int rc;
1867 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1868 PGM_LOCK_ASSERT_OWNER(pVM);
1869
1870 /*
1871 * Make sure the page is writable.
1872 */
1873 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1874 {
1875 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1876 if (RT_FAILURE(rc))
1877 return rc;
1878 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1879 }
1880 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1881
1882 /*
1883 * Do the job.
1884 */
1885 PPGMPAGEMAPTLBE pTlbe;
1886 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1887 if (RT_FAILURE(rc))
1888 return rc;
1889 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1890 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1891 return VINF_SUCCESS;
1892}
1893
1894
1895/**
1896 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1897 * own the PGM lock and have access to the page structure.
1898 *
1899 * @returns VBox status code.
1900 * @retval VINF_SUCCESS on success.
1901 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1902 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1903 *
1904 * @param pVM The cross context VM structure.
1905 * @param GCPhys The guest physical address of the page that should be mapped.
1906 * @param pPage Pointer to the PGMPAGE structure for the page.
1907 * @param ppv Where to store the address corresponding to GCPhys.
1908 * @param pLock Where to store the lock information that
1909 * pgmPhysReleaseInternalPageMappingLock needs.
1910 *
1911 * @internal
1912 */
1913int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1914{
1915 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1916 PGM_LOCK_ASSERT_OWNER(pVM);
1917 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1918
1919 /*
1920 * Do the job.
1921 */
1922 PPGMPAGEMAPTLBE pTlbe;
1923 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1924 if (RT_FAILURE(rc))
1925 return rc;
1926 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1927 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/**
1933 * Requests the mapping of a guest page into the current context.
1934 *
1935 * This API should only be used for very short term, as it will consume scarse
1936 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1937 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1938 *
1939 * This API will assume your intention is to write to the page, and will
1940 * therefore replace shared and zero pages. If you do not intend to modify
1941 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1942 *
1943 * @returns VBox status code.
1944 * @retval VINF_SUCCESS on success.
1945 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1946 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1947 *
1948 * @param pVM The cross context VM structure.
1949 * @param GCPhys The guest physical address of the page that should be
1950 * mapped.
1951 * @param ppv Where to store the address corresponding to GCPhys.
1952 * @param pLock Where to store the lock information that
1953 * PGMPhysReleasePageMappingLock needs.
1954 *
1955 * @remarks The caller is responsible for dealing with access handlers.
1956 * @todo Add an informational return code for pages with access handlers?
1957 *
1958 * @remark Avoid calling this API from within critical sections (other than
1959 * the PGM one) because of the deadlock risk. External threads may
1960 * need to delegate jobs to the EMTs.
1961 * @remarks Only one page is mapped! Make no assumption about what's after or
1962 * before the returned page!
1963 * @thread Any thread.
1964 */
1965VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1966{
1967 int rc = PGM_LOCK(pVM);
1968 AssertRCReturn(rc, rc);
1969
1970 /*
1971 * Query the Physical TLB entry for the page (may fail).
1972 */
1973 PPGMPAGEMAPTLBE pTlbe;
1974 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1975 if (RT_SUCCESS(rc))
1976 {
1977 /*
1978 * If the page is shared, the zero page, or being write monitored
1979 * it must be converted to a page that's writable if possible.
1980 */
1981 PPGMPAGE pPage = pTlbe->pPage;
1982 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1983 {
1984 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1985 if (RT_SUCCESS(rc))
1986 {
1987 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1988 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1989 }
1990 }
1991 if (RT_SUCCESS(rc))
1992 {
1993 /*
1994 * Now, just perform the locking and calculate the return address.
1995 */
1996 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1997 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1998 }
1999 }
2000
2001 PGM_UNLOCK(pVM);
2002 return rc;
2003}
2004
2005
2006/**
2007 * Requests the mapping of a guest page into the current context.
2008 *
2009 * This API should only be used for very short term, as it will consume scarse
2010 * resources (R0 and GC) in the mapping cache. When you're done with the page,
2011 * call PGMPhysReleasePageMappingLock() ASAP to release it.
2012 *
2013 * @returns VBox status code.
2014 * @retval VINF_SUCCESS on success.
2015 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2016 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2017 *
2018 * @param pVM The cross context VM structure.
2019 * @param GCPhys The guest physical address of the page that should be
2020 * mapped.
2021 * @param ppv Where to store the address corresponding to GCPhys.
2022 * @param pLock Where to store the lock information that
2023 * PGMPhysReleasePageMappingLock needs.
2024 *
2025 * @remarks The caller is responsible for dealing with access handlers.
2026 * @todo Add an informational return code for pages with access handlers?
2027 *
2028 * @remarks Avoid calling this API from within critical sections (other than
2029 * the PGM one) because of the deadlock risk.
2030 * @remarks Only one page is mapped! Make no assumption about what's after or
2031 * before the returned page!
2032 * @thread Any thread.
2033 */
2034VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
2035{
2036 int rc = PGM_LOCK(pVM);
2037 AssertRCReturn(rc, rc);
2038
2039 /*
2040 * Query the Physical TLB entry for the page (may fail).
2041 */
2042 PPGMPAGEMAPTLBE pTlbe;
2043 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
2044 if (RT_SUCCESS(rc))
2045 {
2046 /* MMIO pages doesn't have any readable backing. */
2047 PPGMPAGE pPage = pTlbe->pPage;
2048 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
2049 rc = VERR_PGM_PHYS_PAGE_RESERVED;
2050 else
2051 {
2052 /*
2053 * Now, just perform the locking and calculate the return address.
2054 */
2055 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2056 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
2057 }
2058 }
2059
2060 PGM_UNLOCK(pVM);
2061 return rc;
2062}
2063
2064
2065/**
2066 * Requests the mapping of a guest page given by virtual address into the current context.
2067 *
2068 * This API should only be used for very short term, as it will consume
2069 * scarse resources (R0 and GC) in the mapping cache. When you're done
2070 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2071 *
2072 * This API will assume your intention is to write to the page, and will
2073 * therefore replace shared and zero pages. If you do not intend to modify
2074 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2075 *
2076 * @returns VBox status code.
2077 * @retval VINF_SUCCESS on success.
2078 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2079 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2080 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2081 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2082 *
2083 * @param pVCpu The cross context virtual CPU structure.
2084 * @param GCPtr The guest physical address of the page that should be
2085 * mapped.
2086 * @param ppv Where to store the address corresponding to GCPhys.
2087 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2088 *
2089 * @remark Avoid calling this API from within critical sections (other than
2090 * the PGM one) because of the deadlock risk.
2091 * @thread EMT
2092 */
2093VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2094{
2095 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2096 RTGCPHYS GCPhys;
2097 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2098 if (RT_SUCCESS(rc))
2099 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2100 return rc;
2101}
2102
2103
2104/**
2105 * Requests the mapping of a guest page given by virtual address into the current context.
2106 *
2107 * This API should only be used for very short term, as it will consume
2108 * scarse resources (R0 and GC) in the mapping cache. When you're done
2109 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2110 *
2111 * @returns VBox status code.
2112 * @retval VINF_SUCCESS on success.
2113 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2114 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2115 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2116 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2117 *
2118 * @param pVCpu The cross context virtual CPU structure.
2119 * @param GCPtr The guest physical address of the page that should be
2120 * mapped.
2121 * @param ppv Where to store the address corresponding to GCPtr.
2122 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2123 *
2124 * @remark Avoid calling this API from within critical sections (other than
2125 * the PGM one) because of the deadlock risk.
2126 * @thread EMT
2127 */
2128VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2129{
2130 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2131 RTGCPHYS GCPhys;
2132 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2133 if (RT_SUCCESS(rc))
2134 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2135 return rc;
2136}
2137
2138
2139/**
2140 * Release the mapping of a guest page.
2141 *
2142 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2143 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2144 *
2145 * @param pVM The cross context VM structure.
2146 * @param pLock The lock structure initialized by the mapping function.
2147 */
2148VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2149{
2150# ifndef IN_RING0
2151 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2152# endif
2153 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2154 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2155
2156 pLock->uPageAndType = 0;
2157 pLock->pvMap = NULL;
2158
2159 PGM_LOCK_VOID(pVM);
2160 if (fWriteLock)
2161 {
2162 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2163 Assert(cLocks > 0);
2164 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2165 {
2166 if (cLocks == 1)
2167 {
2168 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2169 pVM->pgm.s.cWriteLockedPages--;
2170 }
2171 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2172 }
2173
2174 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2175 { /* probably extremely likely */ }
2176 else
2177 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2178 }
2179 else
2180 {
2181 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2182 Assert(cLocks > 0);
2183 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2184 {
2185 if (cLocks == 1)
2186 {
2187 Assert(pVM->pgm.s.cReadLockedPages > 0);
2188 pVM->pgm.s.cReadLockedPages--;
2189 }
2190 PGM_PAGE_DEC_READ_LOCKS(pPage);
2191 }
2192 }
2193
2194# ifndef IN_RING0
2195 if (pMap)
2196 {
2197 Assert(pMap->cRefs >= 1);
2198 pMap->cRefs--;
2199 }
2200# endif
2201 PGM_UNLOCK(pVM);
2202}
2203
2204
2205#ifdef IN_RING3
2206/**
2207 * Release the mapping of multiple guest pages.
2208 *
2209 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2210 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2211 *
2212 * @param pVM The cross context VM structure.
2213 * @param cPages Number of pages to unlock.
2214 * @param paLocks Array of locks lock structure initialized by the mapping
2215 * function.
2216 */
2217VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2218{
2219 Assert(cPages > 0);
2220 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2221#ifdef VBOX_STRICT
2222 for (uint32_t i = 1; i < cPages; i++)
2223 {
2224 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2225 AssertPtr(paLocks[i].uPageAndType);
2226 }
2227#endif
2228
2229 PGM_LOCK_VOID(pVM);
2230 if (fWriteLock)
2231 {
2232 /*
2233 * Write locks:
2234 */
2235 for (uint32_t i = 0; i < cPages; i++)
2236 {
2237 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2238 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2239 Assert(cLocks > 0);
2240 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2241 {
2242 if (cLocks == 1)
2243 {
2244 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2245 pVM->pgm.s.cWriteLockedPages--;
2246 }
2247 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2248 }
2249
2250 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2251 { /* probably extremely likely */ }
2252 else
2253 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2254
2255 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2256 if (pMap)
2257 {
2258 Assert(pMap->cRefs >= 1);
2259 pMap->cRefs--;
2260 }
2261
2262 /* Yield the lock: */
2263 if ((i & 1023) == 1023 && i + 1 < cPages)
2264 {
2265 PGM_UNLOCK(pVM);
2266 PGM_LOCK_VOID(pVM);
2267 }
2268 }
2269 }
2270 else
2271 {
2272 /*
2273 * Read locks:
2274 */
2275 for (uint32_t i = 0; i < cPages; i++)
2276 {
2277 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2278 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2279 Assert(cLocks > 0);
2280 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2281 {
2282 if (cLocks == 1)
2283 {
2284 Assert(pVM->pgm.s.cReadLockedPages > 0);
2285 pVM->pgm.s.cReadLockedPages--;
2286 }
2287 PGM_PAGE_DEC_READ_LOCKS(pPage);
2288 }
2289
2290 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2291 if (pMap)
2292 {
2293 Assert(pMap->cRefs >= 1);
2294 pMap->cRefs--;
2295 }
2296
2297 /* Yield the lock: */
2298 if ((i & 1023) == 1023 && i + 1 < cPages)
2299 {
2300 PGM_UNLOCK(pVM);
2301 PGM_LOCK_VOID(pVM);
2302 }
2303 }
2304 }
2305 PGM_UNLOCK(pVM);
2306
2307 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2308}
2309#endif /* IN_RING3 */
2310
2311
2312/**
2313 * Release the internal mapping of a guest page.
2314 *
2315 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2316 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2317 *
2318 * @param pVM The cross context VM structure.
2319 * @param pLock The lock structure initialized by the mapping function.
2320 *
2321 * @remarks Caller must hold the PGM lock.
2322 */
2323void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2324{
2325 PGM_LOCK_ASSERT_OWNER(pVM);
2326 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2327}
2328
2329
2330/**
2331 * Converts a GC physical address to a HC ring-3 pointer.
2332 *
2333 * @returns VINF_SUCCESS on success.
2334 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2335 * page but has no physical backing.
2336 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2337 * GC physical address.
2338 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2339 * a dynamic ram chunk boundary
2340 *
2341 * @param pVM The cross context VM structure.
2342 * @param GCPhys The GC physical address to convert.
2343 * @param pR3Ptr Where to store the R3 pointer on success.
2344 *
2345 * @deprecated Avoid when possible!
2346 */
2347int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2348{
2349/** @todo this is kind of hacky and needs some more work. */
2350#ifndef DEBUG_sandervl
2351 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2352#endif
2353
2354 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2355 PGM_LOCK_VOID(pVM);
2356
2357 PPGMRAMRANGE pRam;
2358 PPGMPAGE pPage;
2359 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2360 if (RT_SUCCESS(rc))
2361 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2362
2363 PGM_UNLOCK(pVM);
2364 Assert(rc <= VINF_SUCCESS);
2365 return rc;
2366}
2367
2368
2369/**
2370 * Converts a guest pointer to a GC physical address.
2371 *
2372 * This uses the current CR3/CR0/CR4 of the guest.
2373 *
2374 * @returns VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure.
2376 * @param GCPtr The guest pointer to convert.
2377 * @param pGCPhys Where to store the GC physical address.
2378 */
2379VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2380{
2381 PGMPTWALK Walk;
2382 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2383 if (pGCPhys && RT_SUCCESS(rc))
2384 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2385 return rc;
2386}
2387
2388
2389/**
2390 * Converts a guest pointer to a HC physical address.
2391 *
2392 * This uses the current CR3/CR0/CR4 of the guest.
2393 *
2394 * @returns VBox status code.
2395 * @param pVCpu The cross context virtual CPU structure.
2396 * @param GCPtr The guest pointer to convert.
2397 * @param pHCPhys Where to store the HC physical address.
2398 */
2399VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2400{
2401 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2402 PGMPTWALK Walk;
2403 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2404 if (RT_SUCCESS(rc))
2405 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2406 return rc;
2407}
2408
2409
2410
2411#undef LOG_GROUP
2412#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2413
2414
2415#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2416/**
2417 * Cache PGMPhys memory access
2418 *
2419 * @param pVM The cross context VM structure.
2420 * @param pCache Cache structure pointer
2421 * @param GCPhys GC physical address
2422 * @param pbR3 HC pointer corresponding to physical page
2423 *
2424 * @thread EMT.
2425 */
2426static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2427{
2428 uint32_t iCacheIndex;
2429
2430 Assert(VM_IS_EMT(pVM));
2431
2432 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2433 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
2434
2435 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2436
2437 ASMBitSet(&pCache->aEntries, iCacheIndex);
2438
2439 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2440 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2441}
2442#endif /* IN_RING3 */
2443
2444
2445/**
2446 * Deals with reading from a page with one or more ALL access handlers.
2447 *
2448 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2449 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2450 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2451 *
2452 * @param pVM The cross context VM structure.
2453 * @param pPage The page descriptor.
2454 * @param GCPhys The physical address to start reading at.
2455 * @param pvBuf Where to put the bits we read.
2456 * @param cb How much to read - less or equal to a page.
2457 * @param enmOrigin The origin of this call.
2458 */
2459static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2460 PGMACCESSORIGIN enmOrigin)
2461{
2462 /*
2463 * The most frequent access here is MMIO and shadowed ROM.
2464 * The current code ASSUMES all these access handlers covers full pages!
2465 */
2466
2467 /*
2468 * Whatever we do we need the source page, map it first.
2469 */
2470 PGMPAGEMAPLOCK PgMpLck;
2471 const void *pvSrc = NULL;
2472 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2473/** @todo Check how this can work for MMIO pages? */
2474 if (RT_FAILURE(rc))
2475 {
2476 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2477 GCPhys, pPage, rc));
2478 memset(pvBuf, 0xff, cb);
2479 return VINF_SUCCESS;
2480 }
2481
2482 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2483
2484 /*
2485 * Deal with any physical handlers.
2486 */
2487 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2488 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2489 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2490 {
2491 PPGMPHYSHANDLER pCur;
2492 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2493 if (RT_SUCCESS(rc))
2494 {
2495 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2496 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
2497 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2498#ifndef IN_RING3
2499 if (enmOrigin != PGMACCESSORIGIN_IEM)
2500 {
2501 /* Cannot reliably handle informational status codes in this context */
2502 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2503 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2504 }
2505#endif
2506 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2507 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
2508 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2509 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2510
2511 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2512 STAM_PROFILE_START(&pCur->Stat, h);
2513 PGM_LOCK_ASSERT_OWNER(pVM);
2514
2515 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2516 PGM_UNLOCK(pVM);
2517 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2518 PGM_LOCK_VOID(pVM);
2519
2520 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2521 pCur = NULL; /* might not be valid anymore. */
2522 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2523 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2524 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2525 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2526 {
2527 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2528 return rcStrict;
2529 }
2530 }
2531 else if (rc == VERR_NOT_FOUND)
2532 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
2533 else
2534 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
2535 }
2536
2537 /*
2538 * Take the default action.
2539 */
2540 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2541 {
2542 memcpy(pvBuf, pvSrc, cb);
2543 rcStrict = VINF_SUCCESS;
2544 }
2545 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2546 return rcStrict;
2547}
2548
2549
2550/**
2551 * Read physical memory.
2552 *
2553 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2554 * want to ignore those.
2555 *
2556 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2557 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2558 * @retval VINF_SUCCESS in all context - read completed.
2559 *
2560 * @retval VINF_EM_OFF in RC and R0 - read completed.
2561 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2562 * @retval VINF_EM_RESET in RC and R0 - read completed.
2563 * @retval VINF_EM_HALT in RC and R0 - read completed.
2564 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2565 *
2566 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2567 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2568 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2569 *
2570 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2571 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2572 *
2573 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2574 *
2575 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2576 * haven't been cleared for strict status codes yet.
2577 *
2578 * @param pVM The cross context VM structure.
2579 * @param GCPhys Physical address start reading from.
2580 * @param pvBuf Where to put the read bits.
2581 * @param cbRead How many bytes to read.
2582 * @param enmOrigin The origin of this call.
2583 */
2584VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2585{
2586 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2587 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2588
2589 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2590 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2591
2592 PGM_LOCK_VOID(pVM);
2593
2594 /*
2595 * Copy loop on ram ranges.
2596 */
2597 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2598 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2599 for (;;)
2600 {
2601 /* Inside range or not? */
2602 if (pRam && GCPhys >= pRam->GCPhys)
2603 {
2604 /*
2605 * Must work our way thru this page by page.
2606 */
2607 RTGCPHYS off = GCPhys - pRam->GCPhys;
2608 while (off < pRam->cb)
2609 {
2610 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2611 PPGMPAGE pPage = &pRam->aPages[iPage];
2612 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2613 if (cb > cbRead)
2614 cb = cbRead;
2615
2616 /*
2617 * Normal page? Get the pointer to it.
2618 */
2619 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2620 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2621 {
2622 /*
2623 * Get the pointer to the page.
2624 */
2625 PGMPAGEMAPLOCK PgMpLck;
2626 const void *pvSrc;
2627 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2628 if (RT_SUCCESS(rc))
2629 {
2630 memcpy(pvBuf, pvSrc, cb);
2631 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2632 }
2633 else
2634 {
2635 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2636 pRam->GCPhys + off, pPage, rc));
2637 memset(pvBuf, 0xff, cb);
2638 }
2639 }
2640 /*
2641 * Have ALL/MMIO access handlers.
2642 */
2643 else
2644 {
2645 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2646 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2647 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2648 else
2649 {
2650 /* Set the remaining buffer to a known value. */
2651 memset(pvBuf, 0xff, cbRead);
2652 PGM_UNLOCK(pVM);
2653 return rcStrict2;
2654 }
2655 }
2656
2657 /* next page */
2658 if (cb >= cbRead)
2659 {
2660 PGM_UNLOCK(pVM);
2661 return rcStrict;
2662 }
2663 cbRead -= cb;
2664 off += cb;
2665 pvBuf = (char *)pvBuf + cb;
2666 } /* walk pages in ram range. */
2667
2668 GCPhys = pRam->GCPhysLast + 1;
2669 }
2670 else
2671 {
2672 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2673
2674 /*
2675 * Unassigned address space.
2676 */
2677 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2678 if (cb >= cbRead)
2679 {
2680 memset(pvBuf, 0xff, cbRead);
2681 break;
2682 }
2683 memset(pvBuf, 0xff, cb);
2684
2685 cbRead -= cb;
2686 pvBuf = (char *)pvBuf + cb;
2687 GCPhys += cb;
2688 }
2689
2690 /* Advance range if necessary. */
2691 while (pRam && GCPhys > pRam->GCPhysLast)
2692 pRam = pRam->CTX_SUFF(pNext);
2693 } /* Ram range walk */
2694
2695 PGM_UNLOCK(pVM);
2696 return rcStrict;
2697}
2698
2699
2700/**
2701 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2702 *
2703 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2704 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2705 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2706 *
2707 * @param pVM The cross context VM structure.
2708 * @param pPage The page descriptor.
2709 * @param GCPhys The physical address to start writing at.
2710 * @param pvBuf What to write.
2711 * @param cbWrite How much to write - less or equal to a page.
2712 * @param enmOrigin The origin of this call.
2713 */
2714static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2715 PGMACCESSORIGIN enmOrigin)
2716{
2717 PGMPAGEMAPLOCK PgMpLck;
2718 void *pvDst = NULL;
2719 VBOXSTRICTRC rcStrict;
2720
2721 /*
2722 * Give priority to physical handlers (like #PF does).
2723 *
2724 * Hope for a lonely physical handler first that covers the whole write
2725 * area. This should be a pretty frequent case with MMIO and the heavy
2726 * usage of full page handlers in the page pool.
2727 */
2728 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2729 PPGMPHYSHANDLER pCur;
2730 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2731 if (RT_SUCCESS(rcStrict))
2732 {
2733 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2734#ifndef IN_RING3
2735 if (enmOrigin != PGMACCESSORIGIN_IEM)
2736 /* Cannot reliably handle informational status codes in this context */
2737 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2738#endif
2739 size_t cbRange = pCur->KeyLast - GCPhys + 1;
2740 if (cbRange > cbWrite)
2741 cbRange = cbWrite;
2742
2743 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
2744 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2745 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2746 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2747 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2748 else
2749 rcStrict = VINF_SUCCESS;
2750 if (RT_SUCCESS(rcStrict))
2751 {
2752 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2753 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2754 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2755 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2756 STAM_PROFILE_START(&pCur->Stat, h);
2757
2758 /* Most handlers will want to release the PGM lock for deadlock prevention
2759 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2760 dirty page trackers will want to keep it for performance reasons. */
2761 PGM_LOCK_ASSERT_OWNER(pVM);
2762 if (pCurType->fKeepPgmLock)
2763 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2764 else
2765 {
2766 PGM_UNLOCK(pVM);
2767 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2768 PGM_LOCK_VOID(pVM);
2769 }
2770
2771 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2772 pCur = NULL; /* might not be valid anymore. */
2773 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2774 {
2775 if (pvDst)
2776 memcpy(pvDst, pvBuf, cbRange);
2777 rcStrict = VINF_SUCCESS;
2778 }
2779 else
2780 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2781 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2782 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2783 }
2784 else
2785 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2786 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2787 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2788 {
2789 if (pvDst)
2790 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2791 return rcStrict;
2792 }
2793
2794 /* more fun to be had below */
2795 cbWrite -= cbRange;
2796 GCPhys += cbRange;
2797 pvBuf = (uint8_t *)pvBuf + cbRange;
2798 pvDst = (uint8_t *)pvDst + cbRange;
2799 }
2800 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
2801 rcStrict = VINF_SUCCESS;
2802 else
2803 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2804 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
2805
2806 /*
2807 * Deal with all the odd ends (used to be deal with virt+phys).
2808 */
2809 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2810
2811 /* We need a writable destination page. */
2812 if (!pvDst)
2813 {
2814 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2815 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2816 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2817 rc2);
2818 }
2819
2820 /** @todo clean up this code some more now there are no virtual handlers any
2821 * more. */
2822 /* The loop state (big + ugly). */
2823 PPGMPHYSHANDLER pPhys = NULL;
2824 uint32_t offPhys = GUEST_PAGE_SIZE;
2825 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2826 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2827
2828 /* The loop. */
2829 for (;;)
2830 {
2831 if (fMorePhys && !pPhys)
2832 {
2833 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
2834 if (RT_SUCCESS_NP(rcStrict))
2835 {
2836 offPhys = 0;
2837 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2838 }
2839 else
2840 {
2841 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2842
2843 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2844 GCPhys, &pPhys);
2845 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
2846 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2847
2848 if ( RT_SUCCESS(rcStrict)
2849 && pPhys->Key <= GCPhys + (cbWrite - 1))
2850 {
2851 offPhys = pPhys->Key - GCPhys;
2852 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2853 Assert(pPhys->KeyLast - pPhys->Key < _4G);
2854 }
2855 else
2856 {
2857 pPhys = NULL;
2858 fMorePhys = false;
2859 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2860 }
2861 }
2862 }
2863
2864 /*
2865 * Handle access to space without handlers (that's easy).
2866 */
2867 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2868 uint32_t cbRange = (uint32_t)cbWrite;
2869 Assert(cbRange == cbWrite);
2870
2871 /*
2872 * Physical handler.
2873 */
2874 if (!offPhys)
2875 {
2876#ifndef IN_RING3
2877 if (enmOrigin != PGMACCESSORIGIN_IEM)
2878 /* Cannot reliably handle informational status codes in this context */
2879 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2880#endif
2881 if (cbRange > offPhysLast + 1)
2882 cbRange = offPhysLast + 1;
2883
2884 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
2885 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2886 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2887 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2888
2889 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2890 STAM_PROFILE_START(&pPhys->Stat, h);
2891
2892 /* Most handlers will want to release the PGM lock for deadlock prevention
2893 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2894 dirty page trackers will want to keep it for performance reasons. */
2895 PGM_LOCK_ASSERT_OWNER(pVM);
2896 if (pCurType->fKeepPgmLock)
2897 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2898 else
2899 {
2900 PGM_UNLOCK(pVM);
2901 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2902 PGM_LOCK_VOID(pVM);
2903 }
2904
2905 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2906 pPhys = NULL; /* might not be valid anymore. */
2907 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2908 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2909 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2910 }
2911
2912 /*
2913 * Execute the default action and merge the status codes.
2914 */
2915 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2916 {
2917 memcpy(pvDst, pvBuf, cbRange);
2918 rcStrict2 = VINF_SUCCESS;
2919 }
2920 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2921 {
2922 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2923 return rcStrict2;
2924 }
2925 else
2926 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2927
2928 /*
2929 * Advance if we've got more stuff to do.
2930 */
2931 if (cbRange >= cbWrite)
2932 {
2933 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2934 return rcStrict;
2935 }
2936
2937
2938 cbWrite -= cbRange;
2939 GCPhys += cbRange;
2940 pvBuf = (uint8_t *)pvBuf + cbRange;
2941 pvDst = (uint8_t *)pvDst + cbRange;
2942
2943 offPhys -= cbRange;
2944 offPhysLast -= cbRange;
2945 }
2946}
2947
2948
2949/**
2950 * Write to physical memory.
2951 *
2952 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2953 * want to ignore those.
2954 *
2955 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2956 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2957 * @retval VINF_SUCCESS in all context - write completed.
2958 *
2959 * @retval VINF_EM_OFF in RC and R0 - write completed.
2960 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2961 * @retval VINF_EM_RESET in RC and R0 - write completed.
2962 * @retval VINF_EM_HALT in RC and R0 - write completed.
2963 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2964 *
2965 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2966 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2967 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2968 *
2969 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2970 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2971 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2972 *
2973 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2974 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2975 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2976 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2977 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2978 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2979 *
2980 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2981 * haven't been cleared for strict status codes yet.
2982 *
2983 *
2984 * @param pVM The cross context VM structure.
2985 * @param GCPhys Physical address to write to.
2986 * @param pvBuf What to write.
2987 * @param cbWrite How many bytes to write.
2988 * @param enmOrigin Who is calling.
2989 */
2990VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2991{
2992 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2993 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2994 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2995
2996 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2997 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2998
2999 PGM_LOCK_VOID(pVM);
3000
3001 /*
3002 * Copy loop on ram ranges.
3003 */
3004 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3005 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
3006 for (;;)
3007 {
3008 /* Inside range or not? */
3009 if (pRam && GCPhys >= pRam->GCPhys)
3010 {
3011 /*
3012 * Must work our way thru this page by page.
3013 */
3014 RTGCPTR off = GCPhys - pRam->GCPhys;
3015 while (off < pRam->cb)
3016 {
3017 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
3018 PPGMPAGE pPage = &pRam->aPages[iPage];
3019 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
3020 if (cb > cbWrite)
3021 cb = cbWrite;
3022
3023 /*
3024 * Normal page? Get the pointer to it.
3025 */
3026 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3027 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3028 {
3029 PGMPAGEMAPLOCK PgMpLck;
3030 void *pvDst;
3031 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3032 if (RT_SUCCESS(rc))
3033 {
3034 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3035 memcpy(pvDst, pvBuf, cb);
3036 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3037 }
3038 /* Ignore writes to ballooned pages. */
3039 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3040 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3041 pRam->GCPhys + off, pPage, rc));
3042 }
3043 /*
3044 * Active WRITE or ALL access handlers.
3045 */
3046 else
3047 {
3048 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3049 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3050 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3051 else
3052 {
3053 PGM_UNLOCK(pVM);
3054 return rcStrict2;
3055 }
3056 }
3057
3058 /* next page */
3059 if (cb >= cbWrite)
3060 {
3061 PGM_UNLOCK(pVM);
3062 return rcStrict;
3063 }
3064
3065 cbWrite -= cb;
3066 off += cb;
3067 pvBuf = (const char *)pvBuf + cb;
3068 } /* walk pages in ram range */
3069
3070 GCPhys = pRam->GCPhysLast + 1;
3071 }
3072 else
3073 {
3074 /*
3075 * Unassigned address space, skip it.
3076 */
3077 if (!pRam)
3078 break;
3079 size_t cb = pRam->GCPhys - GCPhys;
3080 if (cb >= cbWrite)
3081 break;
3082 cbWrite -= cb;
3083 pvBuf = (const char *)pvBuf + cb;
3084 GCPhys += cb;
3085 }
3086
3087 /* Advance range if necessary. */
3088 while (pRam && GCPhys > pRam->GCPhysLast)
3089 pRam = pRam->CTX_SUFF(pNext);
3090 } /* Ram range walk */
3091
3092 PGM_UNLOCK(pVM);
3093 return rcStrict;
3094}
3095
3096
3097/**
3098 * Read from guest physical memory by GC physical address, bypassing
3099 * MMIO and access handlers.
3100 *
3101 * @returns VBox status code.
3102 * @param pVM The cross context VM structure.
3103 * @param pvDst The destination address.
3104 * @param GCPhysSrc The source address (GC physical address).
3105 * @param cb The number of bytes to read.
3106 */
3107VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3108{
3109 /*
3110 * Treat the first page as a special case.
3111 */
3112 if (!cb)
3113 return VINF_SUCCESS;
3114
3115 /* map the 1st page */
3116 void const *pvSrc;
3117 PGMPAGEMAPLOCK Lock;
3118 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3119 if (RT_FAILURE(rc))
3120 return rc;
3121
3122 /* optimize for the case where access is completely within the first page. */
3123 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3124 if (RT_LIKELY(cb <= cbPage))
3125 {
3126 memcpy(pvDst, pvSrc, cb);
3127 PGMPhysReleasePageMappingLock(pVM, &Lock);
3128 return VINF_SUCCESS;
3129 }
3130
3131 /* copy to the end of the page. */
3132 memcpy(pvDst, pvSrc, cbPage);
3133 PGMPhysReleasePageMappingLock(pVM, &Lock);
3134 GCPhysSrc += cbPage;
3135 pvDst = (uint8_t *)pvDst + cbPage;
3136 cb -= cbPage;
3137
3138 /*
3139 * Page by page.
3140 */
3141 for (;;)
3142 {
3143 /* map the page */
3144 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3145 if (RT_FAILURE(rc))
3146 return rc;
3147
3148 /* last page? */
3149 if (cb <= GUEST_PAGE_SIZE)
3150 {
3151 memcpy(pvDst, pvSrc, cb);
3152 PGMPhysReleasePageMappingLock(pVM, &Lock);
3153 return VINF_SUCCESS;
3154 }
3155
3156 /* copy the entire page and advance */
3157 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3158 PGMPhysReleasePageMappingLock(pVM, &Lock);
3159 GCPhysSrc += GUEST_PAGE_SIZE;
3160 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3161 cb -= GUEST_PAGE_SIZE;
3162 }
3163 /* won't ever get here. */
3164}
3165
3166
3167/**
3168 * Write to guest physical memory referenced by GC pointer.
3169 * Write memory to GC physical address in guest physical memory.
3170 *
3171 * This will bypass MMIO and access handlers.
3172 *
3173 * @returns VBox status code.
3174 * @param pVM The cross context VM structure.
3175 * @param GCPhysDst The GC physical address of the destination.
3176 * @param pvSrc The source buffer.
3177 * @param cb The number of bytes to write.
3178 */
3179VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3180{
3181 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3182
3183 /*
3184 * Treat the first page as a special case.
3185 */
3186 if (!cb)
3187 return VINF_SUCCESS;
3188
3189 /* map the 1st page */
3190 void *pvDst;
3191 PGMPAGEMAPLOCK Lock;
3192 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3193 if (RT_FAILURE(rc))
3194 return rc;
3195
3196 /* optimize for the case where access is completely within the first page. */
3197 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3198 if (RT_LIKELY(cb <= cbPage))
3199 {
3200 memcpy(pvDst, pvSrc, cb);
3201 PGMPhysReleasePageMappingLock(pVM, &Lock);
3202 return VINF_SUCCESS;
3203 }
3204
3205 /* copy to the end of the page. */
3206 memcpy(pvDst, pvSrc, cbPage);
3207 PGMPhysReleasePageMappingLock(pVM, &Lock);
3208 GCPhysDst += cbPage;
3209 pvSrc = (const uint8_t *)pvSrc + cbPage;
3210 cb -= cbPage;
3211
3212 /*
3213 * Page by page.
3214 */
3215 for (;;)
3216 {
3217 /* map the page */
3218 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3219 if (RT_FAILURE(rc))
3220 return rc;
3221
3222 /* last page? */
3223 if (cb <= GUEST_PAGE_SIZE)
3224 {
3225 memcpy(pvDst, pvSrc, cb);
3226 PGMPhysReleasePageMappingLock(pVM, &Lock);
3227 return VINF_SUCCESS;
3228 }
3229
3230 /* copy the entire page and advance */
3231 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3232 PGMPhysReleasePageMappingLock(pVM, &Lock);
3233 GCPhysDst += GUEST_PAGE_SIZE;
3234 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3235 cb -= GUEST_PAGE_SIZE;
3236 }
3237 /* won't ever get here. */
3238}
3239
3240
3241/**
3242 * Read from guest physical memory referenced by GC pointer.
3243 *
3244 * This function uses the current CR3/CR0/CR4 of the guest and will
3245 * bypass access handlers and not set any accessed bits.
3246 *
3247 * @returns VBox status code.
3248 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3249 * @param pvDst The destination address.
3250 * @param GCPtrSrc The source address (GC pointer).
3251 * @param cb The number of bytes to read.
3252 */
3253VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3254{
3255 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3256/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3257
3258 /*
3259 * Treat the first page as a special case.
3260 */
3261 if (!cb)
3262 return VINF_SUCCESS;
3263
3264 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3265 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3266
3267 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3268 * when many VCPUs are fighting for the lock.
3269 */
3270 PGM_LOCK_VOID(pVM);
3271
3272 /* map the 1st page */
3273 void const *pvSrc;
3274 PGMPAGEMAPLOCK Lock;
3275 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3276 if (RT_FAILURE(rc))
3277 {
3278 PGM_UNLOCK(pVM);
3279 return rc;
3280 }
3281
3282 /* optimize for the case where access is completely within the first page. */
3283 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3284 if (RT_LIKELY(cb <= cbPage))
3285 {
3286 memcpy(pvDst, pvSrc, cb);
3287 PGMPhysReleasePageMappingLock(pVM, &Lock);
3288 PGM_UNLOCK(pVM);
3289 return VINF_SUCCESS;
3290 }
3291
3292 /* copy to the end of the page. */
3293 memcpy(pvDst, pvSrc, cbPage);
3294 PGMPhysReleasePageMappingLock(pVM, &Lock);
3295 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3296 pvDst = (uint8_t *)pvDst + cbPage;
3297 cb -= cbPage;
3298
3299 /*
3300 * Page by page.
3301 */
3302 for (;;)
3303 {
3304 /* map the page */
3305 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3306 if (RT_FAILURE(rc))
3307 {
3308 PGM_UNLOCK(pVM);
3309 return rc;
3310 }
3311
3312 /* last page? */
3313 if (cb <= GUEST_PAGE_SIZE)
3314 {
3315 memcpy(pvDst, pvSrc, cb);
3316 PGMPhysReleasePageMappingLock(pVM, &Lock);
3317 PGM_UNLOCK(pVM);
3318 return VINF_SUCCESS;
3319 }
3320
3321 /* copy the entire page and advance */
3322 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3323 PGMPhysReleasePageMappingLock(pVM, &Lock);
3324 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3325 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3326 cb -= GUEST_PAGE_SIZE;
3327 }
3328 /* won't ever get here. */
3329}
3330
3331
3332/**
3333 * Write to guest physical memory referenced by GC pointer.
3334 *
3335 * This function uses the current CR3/CR0/CR4 of the guest and will
3336 * bypass access handlers and not set dirty or accessed bits.
3337 *
3338 * @returns VBox status code.
3339 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3340 * @param GCPtrDst The destination address (GC pointer).
3341 * @param pvSrc The source address.
3342 * @param cb The number of bytes to write.
3343 */
3344VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3345{
3346 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3347 VMCPU_ASSERT_EMT(pVCpu);
3348
3349 /*
3350 * Treat the first page as a special case.
3351 */
3352 if (!cb)
3353 return VINF_SUCCESS;
3354
3355 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3356 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3357
3358 /* map the 1st page */
3359 void *pvDst;
3360 PGMPAGEMAPLOCK Lock;
3361 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3362 if (RT_FAILURE(rc))
3363 return rc;
3364
3365 /* optimize for the case where access is completely within the first page. */
3366 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3367 if (RT_LIKELY(cb <= cbPage))
3368 {
3369 memcpy(pvDst, pvSrc, cb);
3370 PGMPhysReleasePageMappingLock(pVM, &Lock);
3371 return VINF_SUCCESS;
3372 }
3373
3374 /* copy to the end of the page. */
3375 memcpy(pvDst, pvSrc, cbPage);
3376 PGMPhysReleasePageMappingLock(pVM, &Lock);
3377 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3378 pvSrc = (const uint8_t *)pvSrc + cbPage;
3379 cb -= cbPage;
3380
3381 /*
3382 * Page by page.
3383 */
3384 for (;;)
3385 {
3386 /* map the page */
3387 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3388 if (RT_FAILURE(rc))
3389 return rc;
3390
3391 /* last page? */
3392 if (cb <= GUEST_PAGE_SIZE)
3393 {
3394 memcpy(pvDst, pvSrc, cb);
3395 PGMPhysReleasePageMappingLock(pVM, &Lock);
3396 return VINF_SUCCESS;
3397 }
3398
3399 /* copy the entire page and advance */
3400 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3401 PGMPhysReleasePageMappingLock(pVM, &Lock);
3402 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3403 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3404 cb -= GUEST_PAGE_SIZE;
3405 }
3406 /* won't ever get here. */
3407}
3408
3409
3410/**
3411 * Write to guest physical memory referenced by GC pointer and update the PTE.
3412 *
3413 * This function uses the current CR3/CR0/CR4 of the guest and will
3414 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3415 *
3416 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3417 *
3418 * @returns VBox status code.
3419 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3420 * @param GCPtrDst The destination address (GC pointer).
3421 * @param pvSrc The source address.
3422 * @param cb The number of bytes to write.
3423 */
3424VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3425{
3426 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3427 VMCPU_ASSERT_EMT(pVCpu);
3428
3429 /*
3430 * Treat the first page as a special case.
3431 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3432 */
3433 if (!cb)
3434 return VINF_SUCCESS;
3435
3436 /* map the 1st page */
3437 void *pvDst;
3438 PGMPAGEMAPLOCK Lock;
3439 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3440 if (RT_FAILURE(rc))
3441 return rc;
3442
3443 /* optimize for the case where access is completely within the first page. */
3444 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3445 if (RT_LIKELY(cb <= cbPage))
3446 {
3447 memcpy(pvDst, pvSrc, cb);
3448 PGMPhysReleasePageMappingLock(pVM, &Lock);
3449 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3450 return VINF_SUCCESS;
3451 }
3452
3453 /* copy to the end of the page. */
3454 memcpy(pvDst, pvSrc, cbPage);
3455 PGMPhysReleasePageMappingLock(pVM, &Lock);
3456 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3457 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3458 pvSrc = (const uint8_t *)pvSrc + cbPage;
3459 cb -= cbPage;
3460
3461 /*
3462 * Page by page.
3463 */
3464 for (;;)
3465 {
3466 /* map the page */
3467 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3468 if (RT_FAILURE(rc))
3469 return rc;
3470
3471 /* last page? */
3472 if (cb <= GUEST_PAGE_SIZE)
3473 {
3474 memcpy(pvDst, pvSrc, cb);
3475 PGMPhysReleasePageMappingLock(pVM, &Lock);
3476 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3477 return VINF_SUCCESS;
3478 }
3479
3480 /* copy the entire page and advance */
3481 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3482 PGMPhysReleasePageMappingLock(pVM, &Lock);
3483 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3484 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3485 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3486 cb -= GUEST_PAGE_SIZE;
3487 }
3488 /* won't ever get here. */
3489}
3490
3491
3492/**
3493 * Read from guest physical memory referenced by GC pointer.
3494 *
3495 * This function uses the current CR3/CR0/CR4 of the guest and will
3496 * respect access handlers and set accessed bits.
3497 *
3498 * @returns Strict VBox status, see PGMPhysRead for details.
3499 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3500 * specified virtual address.
3501 *
3502 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3503 * @param pvDst The destination address.
3504 * @param GCPtrSrc The source address (GC pointer).
3505 * @param cb The number of bytes to read.
3506 * @param enmOrigin Who is calling.
3507 * @thread EMT(pVCpu)
3508 */
3509VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3510{
3511 int rc;
3512 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3513 VMCPU_ASSERT_EMT(pVCpu);
3514
3515 /*
3516 * Anything to do?
3517 */
3518 if (!cb)
3519 return VINF_SUCCESS;
3520
3521 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3522
3523 /*
3524 * Optimize reads within a single page.
3525 */
3526 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3527 {
3528 /* Convert virtual to physical address + flags */
3529 PGMPTWALK Walk;
3530 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3531 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3532 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3533
3534 /* mark the guest page as accessed. */
3535 if (!(Walk.fEffective & X86_PTE_A))
3536 {
3537 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3538 AssertRC(rc);
3539 }
3540
3541 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3542 }
3543
3544 /*
3545 * Page by page.
3546 */
3547 for (;;)
3548 {
3549 /* Convert virtual to physical address + flags */
3550 PGMPTWALK Walk;
3551 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3552 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3553 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3554
3555 /* mark the guest page as accessed. */
3556 if (!(Walk.fEffective & X86_PTE_A))
3557 {
3558 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3559 AssertRC(rc);
3560 }
3561
3562 /* copy */
3563 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3564 if (cbRead < cb)
3565 {
3566 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3567 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3568 { /* likely */ }
3569 else
3570 return rcStrict;
3571 }
3572 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3573 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3574
3575 /* next */
3576 Assert(cb > cbRead);
3577 cb -= cbRead;
3578 pvDst = (uint8_t *)pvDst + cbRead;
3579 GCPtrSrc += cbRead;
3580 }
3581}
3582
3583
3584/**
3585 * Write to guest physical memory referenced by GC pointer.
3586 *
3587 * This function uses the current CR3/CR0/CR4 of the guest and will
3588 * respect access handlers and set dirty and accessed bits.
3589 *
3590 * @returns Strict VBox status, see PGMPhysWrite for details.
3591 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3592 * specified virtual address.
3593 *
3594 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3595 * @param GCPtrDst The destination address (GC pointer).
3596 * @param pvSrc The source address.
3597 * @param cb The number of bytes to write.
3598 * @param enmOrigin Who is calling.
3599 */
3600VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3601{
3602 int rc;
3603 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3604 VMCPU_ASSERT_EMT(pVCpu);
3605
3606 /*
3607 * Anything to do?
3608 */
3609 if (!cb)
3610 return VINF_SUCCESS;
3611
3612 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3613
3614 /*
3615 * Optimize writes within a single page.
3616 */
3617 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3618 {
3619 /* Convert virtual to physical address + flags */
3620 PGMPTWALK Walk;
3621 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3622 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3623 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3624
3625 /* Mention when we ignore X86_PTE_RW... */
3626 if (!(Walk.fEffective & X86_PTE_RW))
3627 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3628
3629 /* Mark the guest page as accessed and dirty if necessary. */
3630 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3631 {
3632 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3633 AssertRC(rc);
3634 }
3635
3636 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3637 }
3638
3639 /*
3640 * Page by page.
3641 */
3642 for (;;)
3643 {
3644 /* Convert virtual to physical address + flags */
3645 PGMPTWALK Walk;
3646 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3647 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3648 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3649
3650 /* Mention when we ignore X86_PTE_RW... */
3651 if (!(Walk.fEffective & X86_PTE_RW))
3652 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3653
3654 /* Mark the guest page as accessed and dirty if necessary. */
3655 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3656 {
3657 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3658 AssertRC(rc);
3659 }
3660
3661 /* copy */
3662 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3663 if (cbWrite < cb)
3664 {
3665 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3666 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3667 { /* likely */ }
3668 else
3669 return rcStrict;
3670 }
3671 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3672 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3673
3674 /* next */
3675 Assert(cb > cbWrite);
3676 cb -= cbWrite;
3677 pvSrc = (uint8_t *)pvSrc + cbWrite;
3678 GCPtrDst += cbWrite;
3679 }
3680}
3681
3682
3683/**
3684 * Return the page type of the specified physical address.
3685 *
3686 * @returns The page type.
3687 * @param pVM The cross context VM structure.
3688 * @param GCPhys Guest physical address
3689 */
3690VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3691{
3692 PGM_LOCK_VOID(pVM);
3693 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3694 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3695 PGM_UNLOCK(pVM);
3696
3697 return enmPgType;
3698}
3699
3700
3701/**
3702 * Converts a GC physical address to a HC ring-3 pointer, with some
3703 * additional checks.
3704 *
3705 * @returns VBox status code (no informational statuses).
3706 *
3707 * @param pVM The cross context VM structure.
3708 * @param pVCpu The cross context virtual CPU structure of the
3709 * calling EMT.
3710 * @param GCPhys The GC physical address to convert. This API mask
3711 * the A20 line when necessary.
3712 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3713 * be done while holding the PGM lock.
3714 * @param ppb Where to store the pointer corresponding to GCPhys
3715 * on success.
3716 * @param pfTlb The TLB flags and revision. We only add stuff.
3717 *
3718 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3719 * PGMPhysIemGCPhys2Ptr.
3720 *
3721 * @thread EMT(pVCpu).
3722 */
3723VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3724 R3R0PTRTYPE(uint8_t *) *ppb,
3725 uint64_t *pfTlb)
3726{
3727 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3728 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3729
3730 PGM_LOCK_VOID(pVM);
3731
3732 PPGMRAMRANGE pRam;
3733 PPGMPAGE pPage;
3734 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3735 if (RT_SUCCESS(rc))
3736 {
3737 if (!PGM_PAGE_IS_BALLOONED(pPage))
3738 {
3739 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3740 {
3741 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3742 {
3743 /*
3744 * No access handler.
3745 */
3746 switch (PGM_PAGE_GET_STATE(pPage))
3747 {
3748 case PGM_PAGE_STATE_ALLOCATED:
3749 Assert(!PGM_PAGE_IS_CODE_PAGE(pPage));
3750 *pfTlb |= *puTlbPhysRev;
3751 break;
3752 case PGM_PAGE_STATE_BALLOONED:
3753 AssertFailed();
3754 RT_FALL_THRU();
3755 case PGM_PAGE_STATE_ZERO:
3756 case PGM_PAGE_STATE_SHARED:
3757 case PGM_PAGE_STATE_WRITE_MONITORED:
3758 if (!PGM_PAGE_IS_CODE_PAGE(pPage))
3759 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3760 else
3761 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
3762 break;
3763 }
3764
3765 PPGMPAGEMAPTLBE pTlbe;
3766 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3767 AssertLogRelRCReturn(rc, rc);
3768 *ppb = (uint8_t *)pTlbe->pv;
3769 }
3770 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3771 {
3772 /*
3773 * MMIO or similar all access handler: Catch all access.
3774 */
3775 *pfTlb |= *puTlbPhysRev
3776 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3777 *ppb = NULL;
3778 }
3779 else
3780 {
3781 /*
3782 * Write access handler: Catch write accesses if active.
3783 */
3784 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3785 {
3786 if (!PGM_PAGE_IS_CODE_PAGE(pPage)) /* ROM pages end up here */
3787 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3788 else
3789 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
3790 }
3791 else
3792 switch (PGM_PAGE_GET_STATE(pPage))
3793 {
3794 case PGM_PAGE_STATE_ALLOCATED:
3795 Assert(!PGM_PAGE_IS_CODE_PAGE(pPage));
3796 *pfTlb |= *puTlbPhysRev;
3797 break;
3798 case PGM_PAGE_STATE_BALLOONED:
3799 AssertFailed();
3800 RT_FALL_THRU();
3801 case PGM_PAGE_STATE_ZERO:
3802 case PGM_PAGE_STATE_SHARED:
3803 case PGM_PAGE_STATE_WRITE_MONITORED:
3804 if (!PGM_PAGE_IS_CODE_PAGE(pPage))
3805 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3806 else
3807 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
3808 break;
3809 }
3810
3811 PPGMPAGEMAPTLBE pTlbe;
3812 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3813 AssertLogRelRCReturn(rc, rc);
3814 *ppb = (uint8_t *)pTlbe->pv;
3815 }
3816 }
3817 else
3818 {
3819 /* Alias MMIO: For now, we catch all access. */
3820 *pfTlb |= *puTlbPhysRev
3821 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3822 *ppb = NULL;
3823 }
3824 }
3825 else
3826 {
3827 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3828 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3829 *ppb = NULL;
3830 }
3831 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3832 }
3833 else
3834 {
3835 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ
3836 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 | PGMIEMGCPHYS2PTR_F_UNASSIGNED;
3837 *ppb = NULL;
3838 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3839 }
3840
3841 PGM_UNLOCK(pVM);
3842 return VINF_SUCCESS;
3843}
3844
3845
3846/**
3847 * Converts a GC physical address to a HC ring-3 pointer, with some
3848 * additional checks.
3849 *
3850 * @returns VBox status code (no informational statuses).
3851 * @retval VINF_SUCCESS on success.
3852 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3853 * access handler of some kind.
3854 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3855 * accesses or is odd in any way.
3856 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3857 *
3858 * @param pVM The cross context VM structure.
3859 * @param pVCpu The cross context virtual CPU structure of the
3860 * calling EMT.
3861 * @param GCPhys The GC physical address to convert. This API mask
3862 * the A20 line when necessary.
3863 * @param fWritable Whether write access is required.
3864 * @param fByPassHandlers Whether to bypass access handlers.
3865 * @param ppv Where to store the pointer corresponding to GCPhys
3866 * on success.
3867 * @param pLock
3868 *
3869 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3870 * @thread EMT(pVCpu).
3871 */
3872VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3873 void **ppv, PPGMPAGEMAPLOCK pLock)
3874{
3875 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3876
3877 PGM_LOCK_VOID(pVM);
3878
3879 PPGMRAMRANGE pRam;
3880 PPGMPAGE pPage;
3881 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3882 if (RT_SUCCESS(rc))
3883 {
3884 if (PGM_PAGE_IS_BALLOONED(pPage))
3885 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3886 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3887 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3888 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3889 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3890 rc = VINF_SUCCESS;
3891 else
3892 {
3893 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3894 {
3895 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3896 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3897 }
3898 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3899 {
3900 Assert(!fByPassHandlers);
3901 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3902 }
3903 }
3904 if (RT_SUCCESS(rc))
3905 {
3906 int rc2;
3907
3908 /* Make sure what we return is writable. */
3909 if (fWritable)
3910 switch (PGM_PAGE_GET_STATE(pPage))
3911 {
3912 case PGM_PAGE_STATE_ALLOCATED:
3913 break;
3914 case PGM_PAGE_STATE_BALLOONED:
3915 AssertFailed();
3916 break;
3917 case PGM_PAGE_STATE_ZERO:
3918 case PGM_PAGE_STATE_SHARED:
3919 case PGM_PAGE_STATE_WRITE_MONITORED:
3920 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3921 AssertLogRelRCReturn(rc2, rc2);
3922 break;
3923 }
3924
3925 /* Get a ring-3 mapping of the address. */
3926 PPGMPAGEMAPTLBE pTlbe;
3927 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3928 AssertLogRelRCReturn(rc2, rc2);
3929
3930 /* Lock it and calculate the address. */
3931 if (fWritable)
3932 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3933 else
3934 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3935 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3936
3937 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3938 }
3939 else
3940 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3941
3942 /* else: handler catching all access, no pointer returned. */
3943 }
3944 else
3945 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3946
3947 PGM_UNLOCK(pVM);
3948 return rc;
3949}
3950
3951
3952/**
3953 * Checks if the give GCPhys page requires special handling for the given access
3954 * because it's MMIO or otherwise monitored.
3955 *
3956 * @returns VBox status code (no informational statuses).
3957 * @retval VINF_SUCCESS on success.
3958 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3959 * access handler of some kind.
3960 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3961 * accesses or is odd in any way.
3962 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3963 *
3964 * @param pVM The cross context VM structure.
3965 * @param GCPhys The GC physical address to convert. Since this is
3966 * only used for filling the REM TLB, the A20 mask must
3967 * be applied before calling this API.
3968 * @param fWritable Whether write access is required.
3969 * @param fByPassHandlers Whether to bypass access handlers.
3970 *
3971 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3972 * a stop gap thing that should be removed once there is a better TLB
3973 * for virtual address accesses.
3974 */
3975VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3976{
3977 PGM_LOCK_VOID(pVM);
3978 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3979
3980 PPGMRAMRANGE pRam;
3981 PPGMPAGE pPage;
3982 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3983 if (RT_SUCCESS(rc))
3984 {
3985 if (PGM_PAGE_IS_BALLOONED(pPage))
3986 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3987 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3988 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3989 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3990 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3991 rc = VINF_SUCCESS;
3992 else
3993 {
3994 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3995 {
3996 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3997 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3998 }
3999 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4000 {
4001 Assert(!fByPassHandlers);
4002 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4003 }
4004 }
4005 }
4006
4007 PGM_UNLOCK(pVM);
4008 return rc;
4009}
4010
4011#ifdef VBOX_WITH_NATIVE_NEM
4012
4013/**
4014 * Interface used by NEM to check what to do on a memory access exit.
4015 *
4016 * @returns VBox status code.
4017 * @param pVM The cross context VM structure.
4018 * @param pVCpu The cross context per virtual CPU structure.
4019 * Optional.
4020 * @param GCPhys The guest physical address.
4021 * @param fMakeWritable Whether to try make the page writable or not. If it
4022 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
4023 * be returned and the return code will be unaffected
4024 * @param pInfo Where to return the page information. This is
4025 * initialized even on failure.
4026 * @param pfnChecker Page in-sync checker callback. Optional.
4027 * @param pvUser User argument to pass to pfnChecker.
4028 */
4029VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4030 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4031{
4032 PGM_LOCK_VOID(pVM);
4033
4034 PPGMPAGE pPage;
4035 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4036 if (RT_SUCCESS(rc))
4037 {
4038 /* Try make it writable if requested. */
4039 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4040 if (fMakeWritable)
4041 switch (PGM_PAGE_GET_STATE(pPage))
4042 {
4043 case PGM_PAGE_STATE_SHARED:
4044 case PGM_PAGE_STATE_WRITE_MONITORED:
4045 case PGM_PAGE_STATE_ZERO:
4046 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4047 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4048 rc = VINF_SUCCESS;
4049 break;
4050 }
4051
4052 /* Fill in the info. */
4053 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4054 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4055 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4056 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4057 pInfo->enmType = enmType;
4058 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4059 switch (PGM_PAGE_GET_STATE(pPage))
4060 {
4061 case PGM_PAGE_STATE_ALLOCATED:
4062 pInfo->fZeroPage = 0;
4063 break;
4064
4065 case PGM_PAGE_STATE_ZERO:
4066 pInfo->fZeroPage = 1;
4067 break;
4068
4069 case PGM_PAGE_STATE_WRITE_MONITORED:
4070 pInfo->fZeroPage = 0;
4071 break;
4072
4073 case PGM_PAGE_STATE_SHARED:
4074 pInfo->fZeroPage = 0;
4075 break;
4076
4077 case PGM_PAGE_STATE_BALLOONED:
4078 pInfo->fZeroPage = 1;
4079 break;
4080
4081 default:
4082 pInfo->fZeroPage = 1;
4083 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4084 }
4085
4086 /* Call the checker and update NEM state. */
4087 if (pfnChecker)
4088 {
4089 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4090 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4091 }
4092
4093 /* Done. */
4094 PGM_UNLOCK(pVM);
4095 }
4096 else
4097 {
4098 PGM_UNLOCK(pVM);
4099
4100 pInfo->HCPhys = NIL_RTHCPHYS;
4101 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4102 pInfo->u2NemState = 0;
4103 pInfo->fHasHandlers = 0;
4104 pInfo->fZeroPage = 0;
4105 pInfo->enmType = PGMPAGETYPE_INVALID;
4106 }
4107
4108 return rc;
4109}
4110
4111
4112/**
4113 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4114 * or higher.
4115 *
4116 * @returns VBox status code from callback.
4117 * @param pVM The cross context VM structure.
4118 * @param pVCpu The cross context per CPU structure. This is
4119 * optional as its only for passing to callback.
4120 * @param uMinState The minimum NEM state value to call on.
4121 * @param pfnCallback The callback function.
4122 * @param pvUser User argument for the callback.
4123 */
4124VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4125 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4126{
4127 /*
4128 * Just brute force this problem.
4129 */
4130 PGM_LOCK_VOID(pVM);
4131 int rc = VINF_SUCCESS;
4132 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4133 {
4134 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4135 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4136 {
4137 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4138 if (u2State < uMinState)
4139 { /* likely */ }
4140 else
4141 {
4142 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4143 if (RT_SUCCESS(rc))
4144 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4145 else
4146 break;
4147 }
4148 }
4149 }
4150 PGM_UNLOCK(pVM);
4151
4152 return rc;
4153}
4154
4155
4156/**
4157 * Helper for setting the NEM state for a range of pages.
4158 *
4159 * @param paPages Array of pages to modify.
4160 * @param cPages How many pages to modify.
4161 * @param u2State The new state value.
4162 */
4163void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4164{
4165 PPGMPAGE pPage = paPages;
4166 while (cPages-- > 0)
4167 {
4168 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4169 pPage++;
4170 }
4171}
4172
4173#endif /* VBOX_WITH_NATIVE_NEM */
4174
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette