VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 102572

Last change on this file since 102572 was 101539, checked in by vboxsync, 14 months ago

DIS,VMM,DBGC,IPRT,++: Some disassembler tweaks and TB disassembly work. bugref:10371 bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 147.7 KB
Line 
1/* $Id: PGMAllPhys.cpp 101539 2023-10-22 02:43:09Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include "PGMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include "PGMInline.h"
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <iprt/assert.h>
47#include <iprt/string.h>
48#include <VBox/log.h>
49#ifdef IN_RING3
50# include <iprt/thread.h>
51#endif
52
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57/** Enable the physical TLB. */
58#define PGM_WITH_PHYS_TLB
59
60/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
61 * Checks if valid physical access handler return code (normal handler, not PF).
62 *
63 * Checks if the given strict status code is one of the expected ones for a
64 * physical access handler in the current context.
65 *
66 * @returns true or false.
67 * @param a_rcStrict The status code.
68 * @param a_fWrite Whether it is a write or read being serviced.
69 *
70 * @remarks We wish to keep the list of statuses here as short as possible.
71 * When changing, please make sure to update the PGMPhysRead,
72 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
73 */
74#ifdef IN_RING3
75# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
76 ( (a_rcStrict) == VINF_SUCCESS \
77 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
78#elif defined(IN_RING0)
79#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
80 ( (a_rcStrict) == VINF_SUCCESS \
81 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
82 \
83 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
84 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
85 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
86 \
87 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
88 || (a_rcStrict) == VINF_EM_DBG_STOP \
89 || (a_rcStrict) == VINF_EM_DBG_EVENT \
90 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
91 || (a_rcStrict) == VINF_EM_OFF \
92 || (a_rcStrict) == VINF_EM_SUSPEND \
93 || (a_rcStrict) == VINF_EM_RESET \
94 )
95#else
96# error "Context?"
97#endif
98
99/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
100 * Checks if valid virtual access handler return code (normal handler, not PF).
101 *
102 * Checks if the given strict status code is one of the expected ones for a
103 * virtual access handler in the current context.
104 *
105 * @returns true or false.
106 * @param a_rcStrict The status code.
107 * @param a_fWrite Whether it is a write or read being serviced.
108 *
109 * @remarks We wish to keep the list of statuses here as short as possible.
110 * When changing, please make sure to update the PGMPhysRead,
111 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
112 */
113#ifdef IN_RING3
114# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
115 ( (a_rcStrict) == VINF_SUCCESS \
116 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
117#elif defined(IN_RING0)
118# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
119 (false /* no virtual handlers in ring-0! */ )
120#else
121# error "Context?"
122#endif
123
124
125
126/**
127 * Calculate the actual table size.
128 *
129 * The memory is layed out like this:
130 * - PGMPHYSHANDLERTREE (8 bytes)
131 * - Allocation bitmap (8-byte size align)
132 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
133 */
134uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
135{
136 /*
137 * A minimum of 64 entries and a maximum of ~64K.
138 */
139 uint32_t cEntries = *pcEntries;
140 if (cEntries <= 64)
141 cEntries = 64;
142 else if (cEntries >= _64K)
143 cEntries = _64K;
144 else
145 cEntries = RT_ALIGN_32(cEntries, 16);
146
147 /*
148 * Do the initial calculation.
149 */
150 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
151 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
152 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
153 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
154
155 /*
156 * Align the total and try use up extra space from that.
157 */
158 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
159 uint32_t cAvail = cbTotalAligned - cbTotal;
160 cAvail /= sizeof(PGMPHYSHANDLER);
161 if (cAvail >= 1)
162 for (;;)
163 {
164 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
165 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
166 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
167 cbTotal = cbTreeAndBitmap + cbTable;
168 if (cbTotal <= cbTotalAligned)
169 break;
170 cEntries--;
171 Assert(cEntries >= 16);
172 }
173
174 /*
175 * Return the result.
176 */
177 *pcbTreeAndBitmap = cbTreeAndBitmap;
178 *pcEntries = cEntries;
179 return cbTotalAligned;
180}
181
182
183/**
184 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
185 */
186DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
187{
188 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
189 if (pRom->GCPhys == GCPhys)
190 return pRom;
191 return NULL;
192}
193
194#ifndef IN_RING3
195
196/**
197 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
198 * \#PF access handler callback for guest ROM range write access.}
199 *
200 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
201 */
202DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
203 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
204
205{
206 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
207 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
208 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
209 int rc;
210 RT_NOREF(uErrorCode, pvFault);
211
212 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
213
214 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
215 switch (pRom->aPages[iPage].enmProt)
216 {
217 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
218 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
219 {
220 /*
221 * If it's a simple instruction which doesn't change the cpu state
222 * we will simply skip it. Otherwise we'll have to defer it to REM.
223 */
224 uint32_t cbOp;
225 PDISSTATE pDis = &pVCpu->pgm.s.Dis;
226 rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbOp);
227 if ( RT_SUCCESS(rc)
228 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
229 && !(pDis->x86.fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
230 {
231 switch (pDis->x86.bOpCode)
232 {
233 /** @todo Find other instructions we can safely skip, possibly
234 * adding this kind of detection to DIS or EM. */
235 case OP_MOV:
236 pCtx->rip += cbOp;
237 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
238 return VINF_SUCCESS;
239 }
240 }
241 break;
242 }
243
244 case PGMROMPROT_READ_RAM_WRITE_RAM:
245 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
246 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
247 AssertRC(rc);
248 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
249
250 case PGMROMPROT_READ_ROM_WRITE_RAM:
251 /* Handle it in ring-3 because it's *way* easier there. */
252 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
253 break;
254
255 default:
256 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
257 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
258 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
259 }
260
261 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
262 return VINF_EM_RAW_EMULATE_INSTR;
263}
264
265#endif /* !IN_RING3 */
266
267
268/**
269 * @callback_method_impl{FNPGMPHYSHANDLER,
270 * Access handler callback for ROM write accesses.}
271 *
272 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
273 */
274DECLCALLBACK(VBOXSTRICTRC)
275pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
276 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
277{
278 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
279 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
280 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
281 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
282 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
283
284 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
285 RT_NOREF(pVCpu, pvPhys, enmOrigin);
286
287 if (enmAccessType == PGMACCESSTYPE_READ)
288 {
289 switch (pRomPage->enmProt)
290 {
291 /*
292 * Take the default action.
293 */
294 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
295 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
296 case PGMROMPROT_READ_ROM_WRITE_RAM:
297 case PGMROMPROT_READ_RAM_WRITE_RAM:
298 return VINF_PGM_HANDLER_DO_DEFAULT;
299
300 default:
301 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
302 pRom->aPages[iPage].enmProt, iPage, GCPhys),
303 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
304 }
305 }
306 else
307 {
308 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
309 switch (pRomPage->enmProt)
310 {
311 /*
312 * Ignore writes.
313 */
314 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
315 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
316 return VINF_SUCCESS;
317
318 /*
319 * Write to the RAM page.
320 */
321 case PGMROMPROT_READ_ROM_WRITE_RAM:
322 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
323 {
324 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
325 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
326
327 /*
328 * Take the lock, do lazy allocation, map the page and copy the data.
329 *
330 * Note that we have to bypass the mapping TLB since it works on
331 * guest physical addresses and entering the shadow page would
332 * kind of screw things up...
333 */
334 PGM_LOCK_VOID(pVM);
335
336 PPGMPAGE pShadowPage = &pRomPage->Shadow;
337 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
338 {
339 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
340 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
341 }
342
343 void *pvDstPage;
344 int rc;
345#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
346 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
347 {
348 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
349 rc = VINF_SUCCESS;
350 }
351 else
352#endif
353 {
354 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
355 if (RT_SUCCESS(rc))
356 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
357 }
358 if (RT_SUCCESS(rc))
359 {
360 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
361 pRomPage->LiveSave.fWrittenTo = true;
362
363 AssertMsg( rc == VINF_SUCCESS
364 || ( rc == VINF_PGM_SYNC_CR3
365 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
366 , ("%Rrc\n", rc));
367 rc = VINF_SUCCESS;
368 }
369
370 PGM_UNLOCK(pVM);
371 return rc;
372 }
373
374 default:
375 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
376 pRom->aPages[iPage].enmProt, iPage, GCPhys),
377 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
378 }
379 }
380}
381
382
383/**
384 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
385 */
386static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
387{
388 /*
389 * Get the MMIO2 range.
390 */
391 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
392 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
393 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
394 Assert(pMmio2->idMmio2 == hMmio2);
395 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
396 VERR_INTERNAL_ERROR_4);
397
398 /*
399 * Get the page and make sure it's an MMIO2 page.
400 */
401 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
402 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
403 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
404
405 /*
406 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
407 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
408 * page is dirty, saving the need for additional storage (bitmap).)
409 */
410 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
411
412 /*
413 * Disable the handler for this page.
414 */
415 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
416 AssertRC(rc);
417#ifndef IN_RING3
418 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
419 {
420 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
421 AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT,
422 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
423 }
424#else
425 RT_NOREF(pVCpu, GCPtr);
426#endif
427 return VINF_SUCCESS;
428}
429
430
431#ifndef IN_RING3
432/**
433 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
434 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
435 *
436 * @remarks The @a uUser is the MMIO2 index.
437 */
438DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
439 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
440{
441 RT_NOREF(pVCpu, uErrorCode, pCtx);
442 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
443 if (RT_SUCCESS(rcStrict))
444 {
445 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
446 PGM_UNLOCK(pVM);
447 }
448 return rcStrict;
449}
450#endif /* !IN_RING3 */
451
452
453/**
454 * @callback_method_impl{FNPGMPHYSHANDLER,
455 * Access handler callback for MMIO2 dirty page tracing.}
456 *
457 * @remarks The @a uUser is the MMIO2 index.
458 */
459DECLCALLBACK(VBOXSTRICTRC)
460pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
461 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
462{
463 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
464 if (RT_SUCCESS(rcStrict))
465 {
466 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
467 PGM_UNLOCK(pVM);
468 if (rcStrict == VINF_SUCCESS)
469 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
470 }
471 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
472 return rcStrict;
473}
474
475
476/**
477 * Invalidates the RAM range TLBs.
478 *
479 * @param pVM The cross context VM structure.
480 */
481void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
482{
483 PGM_LOCK_VOID(pVM);
484 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
485 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
486 PGM_UNLOCK(pVM);
487}
488
489
490/**
491 * Tests if a value of type RTGCPHYS is negative if the type had been signed
492 * instead of unsigned.
493 *
494 * @returns @c true if negative, @c false if positive or zero.
495 * @param a_GCPhys The value to test.
496 * @todo Move me to iprt/types.h.
497 */
498#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
499
500
501/**
502 * Slow worker for pgmPhysGetRange.
503 *
504 * @copydoc pgmPhysGetRange
505 */
506PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
507{
508 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
509
510 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
511 while (pRam)
512 {
513 RTGCPHYS off = GCPhys - pRam->GCPhys;
514 if (off < pRam->cb)
515 {
516 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
517 return pRam;
518 }
519 if (RTGCPHYS_IS_NEGATIVE(off))
520 pRam = pRam->CTX_SUFF(pLeft);
521 else
522 pRam = pRam->CTX_SUFF(pRight);
523 }
524 return NULL;
525}
526
527
528/**
529 * Slow worker for pgmPhysGetRangeAtOrAbove.
530 *
531 * @copydoc pgmPhysGetRangeAtOrAbove
532 */
533PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
534{
535 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
536
537 PPGMRAMRANGE pLastLeft = NULL;
538 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
539 while (pRam)
540 {
541 RTGCPHYS off = GCPhys - pRam->GCPhys;
542 if (off < pRam->cb)
543 {
544 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
545 return pRam;
546 }
547 if (RTGCPHYS_IS_NEGATIVE(off))
548 {
549 pLastLeft = pRam;
550 pRam = pRam->CTX_SUFF(pLeft);
551 }
552 else
553 pRam = pRam->CTX_SUFF(pRight);
554 }
555 return pLastLeft;
556}
557
558
559/**
560 * Slow worker for pgmPhysGetPage.
561 *
562 * @copydoc pgmPhysGetPage
563 */
564PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
565{
566 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
567
568 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
569 while (pRam)
570 {
571 RTGCPHYS off = GCPhys - pRam->GCPhys;
572 if (off < pRam->cb)
573 {
574 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
575 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
576 }
577
578 if (RTGCPHYS_IS_NEGATIVE(off))
579 pRam = pRam->CTX_SUFF(pLeft);
580 else
581 pRam = pRam->CTX_SUFF(pRight);
582 }
583 return NULL;
584}
585
586
587/**
588 * Slow worker for pgmPhysGetPageEx.
589 *
590 * @copydoc pgmPhysGetPageEx
591 */
592int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
593{
594 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
595
596 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
597 while (pRam)
598 {
599 RTGCPHYS off = GCPhys - pRam->GCPhys;
600 if (off < pRam->cb)
601 {
602 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
603 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
604 return VINF_SUCCESS;
605 }
606
607 if (RTGCPHYS_IS_NEGATIVE(off))
608 pRam = pRam->CTX_SUFF(pLeft);
609 else
610 pRam = pRam->CTX_SUFF(pRight);
611 }
612
613 *ppPage = NULL;
614 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
615}
616
617
618/**
619 * Slow worker for pgmPhysGetPageAndRangeEx.
620 *
621 * @copydoc pgmPhysGetPageAndRangeEx
622 */
623int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
624{
625 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
626
627 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
628 while (pRam)
629 {
630 RTGCPHYS off = GCPhys - pRam->GCPhys;
631 if (off < pRam->cb)
632 {
633 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
634 *ppRam = pRam;
635 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
636 return VINF_SUCCESS;
637 }
638
639 if (RTGCPHYS_IS_NEGATIVE(off))
640 pRam = pRam->CTX_SUFF(pLeft);
641 else
642 pRam = pRam->CTX_SUFF(pRight);
643 }
644
645 *ppRam = NULL;
646 *ppPage = NULL;
647 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
648}
649
650
651/**
652 * Checks if Address Gate 20 is enabled or not.
653 *
654 * @returns true if enabled.
655 * @returns false if disabled.
656 * @param pVCpu The cross context virtual CPU structure.
657 */
658VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
659{
660 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
661 return pVCpu->pgm.s.fA20Enabled;
662}
663
664
665/**
666 * Validates a GC physical address.
667 *
668 * @returns true if valid.
669 * @returns false if invalid.
670 * @param pVM The cross context VM structure.
671 * @param GCPhys The physical address to validate.
672 */
673VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
674{
675 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
676 return pPage != NULL;
677}
678
679
680/**
681 * Checks if a GC physical address is a normal page,
682 * i.e. not ROM, MMIO or reserved.
683 *
684 * @returns true if normal.
685 * @returns false if invalid, ROM, MMIO or reserved page.
686 * @param pVM The cross context VM structure.
687 * @param GCPhys The physical address to check.
688 */
689VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
690{
691 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
692 return pPage
693 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
694}
695
696
697/**
698 * Converts a GC physical address to a HC physical address.
699 *
700 * @returns VINF_SUCCESS on success.
701 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
702 * page but has no physical backing.
703 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
704 * GC physical address.
705 *
706 * @param pVM The cross context VM structure.
707 * @param GCPhys The GC physical address to convert.
708 * @param pHCPhys Where to store the HC physical address on success.
709 */
710VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
711{
712 PGM_LOCK_VOID(pVM);
713 PPGMPAGE pPage;
714 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
715 if (RT_SUCCESS(rc))
716 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
717 PGM_UNLOCK(pVM);
718 return rc;
719}
720
721
722/**
723 * Invalidates all page mapping TLBs.
724 *
725 * @param pVM The cross context VM structure.
726 */
727void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
728{
729 PGM_LOCK_VOID(pVM);
730 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
731
732 /* Clear the R3 & R0 TLBs completely. */
733 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
734 {
735 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
736 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
737 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
738 }
739
740 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
741 {
742 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
743 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
744 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
745 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
746 }
747
748 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MISC);
749 PGM_UNLOCK(pVM);
750}
751
752
753/**
754 * Invalidates a page mapping TLB entry
755 *
756 * @param pVM The cross context VM structure.
757 * @param GCPhys GCPhys entry to flush
758 *
759 * @note Caller is responsible for calling IEMTlbInvalidateAllPhysicalAllCpus
760 * when needed.
761 */
762void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
763{
764 PGM_LOCK_ASSERT_OWNER(pVM);
765
766 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
767
768 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
769
770 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
771 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
772 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
773
774 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
775 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
776 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
777 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
778}
779
780
781/**
782 * Makes sure that there is at least one handy page ready for use.
783 *
784 * This will also take the appropriate actions when reaching water-marks.
785 *
786 * @returns VBox status code.
787 * @retval VINF_SUCCESS on success.
788 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
789 *
790 * @param pVM The cross context VM structure.
791 *
792 * @remarks Must be called from within the PGM critical section. It may
793 * nip back to ring-3/0 in some cases.
794 */
795static int pgmPhysEnsureHandyPage(PVMCC pVM)
796{
797 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
798
799 /*
800 * Do we need to do anything special?
801 */
802#ifdef IN_RING3
803 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
804#else
805 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
806#endif
807 {
808 /*
809 * Allocate pages only if we're out of them, or in ring-3, almost out.
810 */
811#ifdef IN_RING3
812 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
813#else
814 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
815#endif
816 {
817 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
818 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
819#ifdef IN_RING3
820 int rc = PGMR3PhysAllocateHandyPages(pVM);
821#else
822 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
823#endif
824 if (RT_UNLIKELY(rc != VINF_SUCCESS))
825 {
826 if (RT_FAILURE(rc))
827 return rc;
828 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
829 if (!pVM->pgm.s.cHandyPages)
830 {
831 LogRel(("PGM: no more handy pages!\n"));
832 return VERR_EM_NO_MEMORY;
833 }
834 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
835 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
836#ifndef IN_RING3
837 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
838#endif
839 }
840 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
841 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
842 ("%u\n", pVM->pgm.s.cHandyPages),
843 VERR_PGM_HANDY_PAGE_IPE);
844 }
845 else
846 {
847 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
848 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
849#ifndef IN_RING3
850 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
851 {
852 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
853 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
854 }
855#endif
856 }
857 }
858
859 return VINF_SUCCESS;
860}
861
862
863/**
864 * Replace a zero or shared page with new page that we can write to.
865 *
866 * @returns The following VBox status codes.
867 * @retval VINF_SUCCESS on success, pPage is modified.
868 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
869 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
870 *
871 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
872 *
873 * @param pVM The cross context VM structure.
874 * @param pPage The physical page tracking structure. This will
875 * be modified on success.
876 * @param GCPhys The address of the page.
877 *
878 * @remarks Must be called from within the PGM critical section. It may
879 * nip back to ring-3/0 in some cases.
880 *
881 * @remarks This function shouldn't really fail, however if it does
882 * it probably means we've screwed up the size of handy pages and/or
883 * the low-water mark. Or, that some device I/O is causing a lot of
884 * pages to be allocated while while the host is in a low-memory
885 * condition. This latter should be handled elsewhere and in a more
886 * controlled manner, it's on the @bugref{3170} todo list...
887 */
888int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
889{
890 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
891
892 /*
893 * Prereqs.
894 */
895 PGM_LOCK_ASSERT_OWNER(pVM);
896 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
897 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
898
899# ifdef PGM_WITH_LARGE_PAGES
900 /*
901 * Try allocate a large page if applicable.
902 */
903 if ( PGMIsUsingLargePages(pVM)
904 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
905 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
906 {
907 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
908 PPGMPAGE pBasePage;
909
910 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
911 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
912 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
913 {
914 rc = pgmPhysAllocLargePage(pVM, GCPhys);
915 if (rc == VINF_SUCCESS)
916 return rc;
917 }
918 /* Mark the base as type page table, so we don't check over and over again. */
919 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
920
921 /* fall back to 4KB pages. */
922 }
923# endif
924
925 /*
926 * Flush any shadow page table mappings of the page.
927 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
928 */
929 bool fFlushTLBs = false;
930 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
931 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
932
933 /*
934 * Ensure that we've got a page handy, take it and use it.
935 */
936 int rc2 = pgmPhysEnsureHandyPage(pVM);
937 if (RT_FAILURE(rc2))
938 {
939 if (fFlushTLBs)
940 PGM_INVL_ALL_VCPU_TLBS(pVM);
941 Assert(rc2 == VERR_EM_NO_MEMORY);
942 return rc2;
943 }
944 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
945 PGM_LOCK_ASSERT_OWNER(pVM);
946 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
947 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
948
949 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
950 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
951 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
952 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
953 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
954 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
955
956 /*
957 * There are one or two action to be taken the next time we allocate handy pages:
958 * - Tell the GMM (global memory manager) what the page is being used for.
959 * (Speeds up replacement operations - sharing and defragmenting.)
960 * - If the current backing is shared, it must be freed.
961 */
962 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
963 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
964
965 void const *pvSharedPage = NULL;
966 if (PGM_PAGE_IS_SHARED(pPage))
967 {
968 /* Mark this shared page for freeing/dereferencing. */
969 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
970 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
971
972 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
973 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
974 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
975 pVM->pgm.s.cSharedPages--;
976
977 /* Grab the address of the page so we can make a copy later on. (safe) */
978 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
979 AssertRC(rc);
980 }
981 else
982 {
983 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
984 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
985 pVM->pgm.s.cZeroPages--;
986 }
987
988 /*
989 * Do the PGMPAGE modifications.
990 */
991 pVM->pgm.s.cPrivatePages++;
992 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
993 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
994 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
995 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
996 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
997 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_ALLOCATED);
998
999 /* Copy the shared page contents to the replacement page. */
1000 if (pvSharedPage)
1001 {
1002 /* Get the virtual address of the new page. */
1003 PGMPAGEMAPLOCK PgMpLck;
1004 void *pvNewPage;
1005 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
1006 if (RT_SUCCESS(rc))
1007 {
1008 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
1009 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1010 }
1011 }
1012
1013 if ( fFlushTLBs
1014 && rc != VINF_PGM_GCPHYS_ALIASED)
1015 PGM_INVL_ALL_VCPU_TLBS(pVM);
1016
1017 /*
1018 * Notify NEM about the mapping change for this page.
1019 *
1020 * Note! Shadow ROM pages are complicated as they can definitely be
1021 * allocated while not visible, so play safe.
1022 */
1023 if (VM_IS_NEM_ENABLED(pVM))
1024 {
1025 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1026 if ( enmType != PGMPAGETYPE_ROM_SHADOW
1027 || pgmPhysGetPage(pVM, GCPhys) == pPage)
1028 {
1029 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1030 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
1031 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1032 if (RT_SUCCESS(rc))
1033 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1034 else
1035 rc = rc2;
1036 }
1037 }
1038
1039 return rc;
1040}
1041
1042#ifdef PGM_WITH_LARGE_PAGES
1043
1044/**
1045 * Replace a 2 MB range of zero pages with new pages that we can write to.
1046 *
1047 * @returns The following VBox status codes.
1048 * @retval VINF_SUCCESS on success, pPage is modified.
1049 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1050 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
1051 *
1052 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
1053 *
1054 * @param pVM The cross context VM structure.
1055 * @param GCPhys The address of the page.
1056 *
1057 * @remarks Must be called from within the PGM critical section. It may block
1058 * on GMM and host mutexes/locks, leaving HM context.
1059 */
1060int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1061{
1062 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1063 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1064 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1065
1066 /*
1067 * Check Prereqs.
1068 */
1069 PGM_LOCK_ASSERT_OWNER(pVM);
1070 Assert(PGMIsUsingLargePages(pVM));
1071
1072 /*
1073 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1074 */
1075 PPGMPAGE pFirstPage;
1076 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1077 if ( RT_SUCCESS(rc)
1078 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1079 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1080 {
1081 /*
1082 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1083 * since they are unallocated.
1084 */
1085 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1086 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1087 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1088 {
1089 /*
1090 * Now, make sure all the other pages in the 2 MB is in the same state.
1091 */
1092 GCPhys = GCPhysBase;
1093 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1094 while (cLeft-- > 0)
1095 {
1096 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1097 if ( pSubPage
1098 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1099 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1100 {
1101 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1102 GCPhys += GUEST_PAGE_SIZE;
1103 }
1104 else
1105 {
1106 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1107 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1108
1109 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1110 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1111 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1112 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1113 }
1114 }
1115
1116 /*
1117 * Do the allocation.
1118 */
1119# ifdef IN_RING3
1120 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1121# elif defined(IN_RING0)
1122 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1123# else
1124# error "Port me"
1125# endif
1126 if (RT_SUCCESS(rc))
1127 {
1128 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1129 pVM->pgm.s.cLargePages++;
1130 return VINF_SUCCESS;
1131 }
1132
1133 /* If we fail once, it most likely means the host's memory is too
1134 fragmented; don't bother trying again. */
1135 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1136 return rc;
1137 }
1138 }
1139 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1140}
1141
1142
1143/**
1144 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1145 *
1146 * @returns The following VBox status codes.
1147 * @retval VINF_SUCCESS on success, the large page can be used again
1148 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1149 *
1150 * @param pVM The cross context VM structure.
1151 * @param GCPhys The address of the page.
1152 * @param pLargePage Page structure of the base page
1153 */
1154int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1155{
1156 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1157
1158 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1159
1160 AssertCompile(X86_PDE2M_PAE_PG_MASK == EPT_PDE2M_PG_MASK); /* Paranoia: Caller uses this for guest EPT tables as well. */
1161 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1162
1163 /* Check the base page. */
1164 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1165 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1166 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1167 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1168 {
1169 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1170 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1171 }
1172
1173 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1174 /* Check all remaining pages in the 2 MB range. */
1175 unsigned i;
1176 GCPhys += GUEST_PAGE_SIZE;
1177 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1178 {
1179 PPGMPAGE pPage;
1180 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1181 AssertRCBreak(rc);
1182
1183 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1184 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1185 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1186 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1187 {
1188 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1189 break;
1190 }
1191
1192 GCPhys += GUEST_PAGE_SIZE;
1193 }
1194 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1195
1196 if (i == _2M / GUEST_PAGE_SIZE)
1197 {
1198 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1199 pVM->pgm.s.cLargePagesDisabled--;
1200 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1201 return VINF_SUCCESS;
1202 }
1203
1204 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1205}
1206
1207#endif /* PGM_WITH_LARGE_PAGES */
1208
1209
1210/**
1211 * Deal with a write monitored page.
1212 *
1213 * @param pVM The cross context VM structure.
1214 * @param pPage The physical page tracking structure.
1215 * @param GCPhys The guest physical address of the page.
1216 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1217 * very unlikely situation where it is okay that we let NEM
1218 * fix the page access in a lazy fasion.
1219 *
1220 * @remarks Called from within the PGM critical section.
1221 */
1222void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1223{
1224 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1225 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1226 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1227 if (PGM_PAGE_IS_CODE_PAGE(pPage))
1228 {
1229 PGM_PAGE_CLEAR_CODE_PAGE(pVM, pPage);
1230 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MADE_WRITABLE);
1231 }
1232
1233 Assert(pVM->pgm.s.cMonitoredPages > 0);
1234 pVM->pgm.s.cMonitoredPages--;
1235 pVM->pgm.s.cWrittenToPages++;
1236
1237#ifdef VBOX_WITH_NATIVE_NEM
1238 /*
1239 * Notify NEM about the protection change so we won't spin forever.
1240 *
1241 * Note! NEM need to be handle to lazily correct page protection as we cannot
1242 * really get it 100% right here it seems. The page pool does this too.
1243 */
1244 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1245 {
1246 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1247 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1248 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1249 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1250 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1251 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1252 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1253 }
1254#else
1255 RT_NOREF(GCPhys);
1256#endif
1257}
1258
1259
1260/**
1261 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1262 *
1263 * @returns VBox strict status code.
1264 * @retval VINF_SUCCESS on success.
1265 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1266 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1267 *
1268 * @param pVM The cross context VM structure.
1269 * @param pPage The physical page tracking structure.
1270 * @param GCPhys The address of the page.
1271 *
1272 * @remarks Called from within the PGM critical section.
1273 */
1274int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1275{
1276 PGM_LOCK_ASSERT_OWNER(pVM);
1277 switch (PGM_PAGE_GET_STATE(pPage))
1278 {
1279 case PGM_PAGE_STATE_WRITE_MONITORED:
1280 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1281 RT_FALL_THRU();
1282 default: /* to shut up GCC */
1283 case PGM_PAGE_STATE_ALLOCATED:
1284 return VINF_SUCCESS;
1285
1286 /*
1287 * Zero pages can be dummy pages for MMIO or reserved memory,
1288 * so we need to check the flags before joining cause with
1289 * shared page replacement.
1290 */
1291 case PGM_PAGE_STATE_ZERO:
1292 if (PGM_PAGE_IS_MMIO(pPage))
1293 return VERR_PGM_PHYS_PAGE_RESERVED;
1294 RT_FALL_THRU();
1295 case PGM_PAGE_STATE_SHARED:
1296 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1297
1298 /* Not allowed to write to ballooned pages. */
1299 case PGM_PAGE_STATE_BALLOONED:
1300 return VERR_PGM_PHYS_PAGE_BALLOONED;
1301 }
1302}
1303
1304
1305/**
1306 * Internal usage: Map the page specified by its GMM ID.
1307 *
1308 * This is similar to pgmPhysPageMap
1309 *
1310 * @returns VBox status code.
1311 *
1312 * @param pVM The cross context VM structure.
1313 * @param idPage The Page ID.
1314 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1315 * @param ppv Where to store the mapping address.
1316 *
1317 * @remarks Called from within the PGM critical section. The mapping is only
1318 * valid while you are inside this section.
1319 */
1320int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1321{
1322 /*
1323 * Validation.
1324 */
1325 PGM_LOCK_ASSERT_OWNER(pVM);
1326 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1327 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1328 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1329
1330#ifdef IN_RING0
1331# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1332 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1333# else
1334 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1335# endif
1336
1337#else
1338 /*
1339 * Find/make Chunk TLB entry for the mapping chunk.
1340 */
1341 PPGMCHUNKR3MAP pMap;
1342 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1343 if (pTlbe->idChunk == idChunk)
1344 {
1345 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1346 pMap = pTlbe->pChunk;
1347 }
1348 else
1349 {
1350 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1351
1352 /*
1353 * Find the chunk, map it if necessary.
1354 */
1355 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1356 if (pMap)
1357 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1358 else
1359 {
1360 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1361 if (RT_FAILURE(rc))
1362 return rc;
1363 }
1364
1365 /*
1366 * Enter it into the Chunk TLB.
1367 */
1368 pTlbe->idChunk = idChunk;
1369 pTlbe->pChunk = pMap;
1370 }
1371
1372 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1373 return VINF_SUCCESS;
1374#endif
1375}
1376
1377
1378/**
1379 * Maps a page into the current virtual address space so it can be accessed.
1380 *
1381 * @returns VBox status code.
1382 * @retval VINF_SUCCESS on success.
1383 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1384 *
1385 * @param pVM The cross context VM structure.
1386 * @param pPage The physical page tracking structure.
1387 * @param GCPhys The address of the page.
1388 * @param ppMap Where to store the address of the mapping tracking structure.
1389 * @param ppv Where to store the mapping address of the page. The page
1390 * offset is masked off!
1391 *
1392 * @remarks Called from within the PGM critical section.
1393 */
1394static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1395{
1396 PGM_LOCK_ASSERT_OWNER(pVM);
1397 NOREF(GCPhys);
1398
1399 /*
1400 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1401 */
1402 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1403 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1404 {
1405 /* Decode the page id to a page in a MMIO2 ram range. */
1406 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1407 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1408 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1409 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1410 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1411 pPage->s.idPage, pPage->s.uStateY),
1412 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1413 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1414 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1415 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1416 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1417 *ppMap = NULL;
1418# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1419 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1420# elif defined(IN_RING0)
1421 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1422 return VINF_SUCCESS;
1423# else
1424 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1425 return VINF_SUCCESS;
1426# endif
1427 }
1428
1429# ifdef VBOX_WITH_PGM_NEM_MODE
1430 if (pVM->pgm.s.fNemMode)
1431 {
1432# ifdef IN_RING3
1433 /*
1434 * Find the corresponding RAM range and use that to locate the mapping address.
1435 */
1436 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1437 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1438 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1439 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1440 Assert(pPage == &pRam->aPages[idxPage]);
1441 *ppMap = NULL;
1442 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1443 return VINF_SUCCESS;
1444# else
1445 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1446# endif
1447 }
1448# endif
1449
1450 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1451 if (idChunk == NIL_GMM_CHUNKID)
1452 {
1453 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1454 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1455 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1456 {
1457 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1458 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1459 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1460 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1461 *ppv = pVM->pgm.s.abZeroPg;
1462 }
1463 else
1464 *ppv = pVM->pgm.s.abZeroPg;
1465 *ppMap = NULL;
1466 return VINF_SUCCESS;
1467 }
1468
1469# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1470 /*
1471 * Just use the physical address.
1472 */
1473 *ppMap = NULL;
1474 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1475
1476# elif defined(IN_RING0)
1477 /*
1478 * Go by page ID thru GMMR0.
1479 */
1480 *ppMap = NULL;
1481 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1482
1483# else
1484 /*
1485 * Find/make Chunk TLB entry for the mapping chunk.
1486 */
1487 PPGMCHUNKR3MAP pMap;
1488 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1489 if (pTlbe->idChunk == idChunk)
1490 {
1491 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1492 pMap = pTlbe->pChunk;
1493 AssertPtr(pMap->pv);
1494 }
1495 else
1496 {
1497 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1498
1499 /*
1500 * Find the chunk, map it if necessary.
1501 */
1502 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1503 if (pMap)
1504 {
1505 AssertPtr(pMap->pv);
1506 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1507 }
1508 else
1509 {
1510 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1511 if (RT_FAILURE(rc))
1512 return rc;
1513 AssertPtr(pMap->pv);
1514 }
1515
1516 /*
1517 * Enter it into the Chunk TLB.
1518 */
1519 pTlbe->idChunk = idChunk;
1520 pTlbe->pChunk = pMap;
1521 }
1522
1523 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1524 *ppMap = pMap;
1525 return VINF_SUCCESS;
1526# endif /* !IN_RING0 */
1527}
1528
1529
1530/**
1531 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1532 *
1533 * This is typically used is paths where we cannot use the TLB methods (like ROM
1534 * pages) or where there is no point in using them since we won't get many hits.
1535 *
1536 * @returns VBox strict status code.
1537 * @retval VINF_SUCCESS on success.
1538 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1539 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1540 *
1541 * @param pVM The cross context VM structure.
1542 * @param pPage The physical page tracking structure.
1543 * @param GCPhys The address of the page.
1544 * @param ppv Where to store the mapping address of the page. The page
1545 * offset is masked off!
1546 *
1547 * @remarks Called from within the PGM critical section. The mapping is only
1548 * valid while you are inside section.
1549 */
1550int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1551{
1552 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1553 if (RT_SUCCESS(rc))
1554 {
1555 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1556 PPGMPAGEMAP pMapIgnore;
1557 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1558 if (RT_FAILURE(rc2)) /* preserve rc */
1559 rc = rc2;
1560 }
1561 return rc;
1562}
1563
1564
1565/**
1566 * Maps a page into the current virtual address space so it can be accessed for
1567 * both writing and reading.
1568 *
1569 * This is typically used is paths where we cannot use the TLB methods (like ROM
1570 * pages) or where there is no point in using them since we won't get many hits.
1571 *
1572 * @returns VBox status code.
1573 * @retval VINF_SUCCESS on success.
1574 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1575 *
1576 * @param pVM The cross context VM structure.
1577 * @param pPage The physical page tracking structure. Must be in the
1578 * allocated state.
1579 * @param GCPhys The address of the page.
1580 * @param ppv Where to store the mapping address of the page. The page
1581 * offset is masked off!
1582 *
1583 * @remarks Called from within the PGM critical section. The mapping is only
1584 * valid while you are inside section.
1585 */
1586int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1587{
1588 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1589 PPGMPAGEMAP pMapIgnore;
1590 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1591}
1592
1593
1594/**
1595 * Maps a page into the current virtual address space so it can be accessed for
1596 * reading.
1597 *
1598 * This is typically used is paths where we cannot use the TLB methods (like ROM
1599 * pages) or where there is no point in using them since we won't get many hits.
1600 *
1601 * @returns VBox status code.
1602 * @retval VINF_SUCCESS on success.
1603 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1604 *
1605 * @param pVM The cross context VM structure.
1606 * @param pPage The physical page tracking structure.
1607 * @param GCPhys The address of the page.
1608 * @param ppv Where to store the mapping address of the page. The page
1609 * offset is masked off!
1610 *
1611 * @remarks Called from within the PGM critical section. The mapping is only
1612 * valid while you are inside this section.
1613 */
1614int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1615{
1616 PPGMPAGEMAP pMapIgnore;
1617 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1618}
1619
1620
1621/**
1622 * Load a guest page into the ring-3 physical TLB.
1623 *
1624 * @returns VBox status code.
1625 * @retval VINF_SUCCESS on success
1626 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1627 * @param pVM The cross context VM structure.
1628 * @param GCPhys The guest physical address in question.
1629 */
1630int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1631{
1632 PGM_LOCK_ASSERT_OWNER(pVM);
1633
1634 /*
1635 * Find the ram range and page and hand it over to the with-page function.
1636 * 99.8% of requests are expected to be in the first range.
1637 */
1638 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1639 if (!pPage)
1640 {
1641 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1642 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1643 }
1644
1645 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1646}
1647
1648
1649/**
1650 * Load a guest page into the ring-3 physical TLB.
1651 *
1652 * @returns VBox status code.
1653 * @retval VINF_SUCCESS on success
1654 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1655 *
1656 * @param pVM The cross context VM structure.
1657 * @param pPage Pointer to the PGMPAGE structure corresponding to
1658 * GCPhys.
1659 * @param GCPhys The guest physical address in question.
1660 */
1661int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1662{
1663 PGM_LOCK_ASSERT_OWNER(pVM);
1664 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1665
1666 /*
1667 * Map the page.
1668 * Make a special case for the zero page as it is kind of special.
1669 */
1670 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1671 if ( !PGM_PAGE_IS_ZERO(pPage)
1672 && !PGM_PAGE_IS_BALLOONED(pPage))
1673 {
1674 void *pv;
1675 PPGMPAGEMAP pMap;
1676 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1677 if (RT_FAILURE(rc))
1678 return rc;
1679# ifndef IN_RING0
1680 pTlbe->pMap = pMap;
1681# endif
1682 pTlbe->pv = pv;
1683 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1684 }
1685 else
1686 {
1687 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1688# ifndef IN_RING0
1689 pTlbe->pMap = NULL;
1690# endif
1691 pTlbe->pv = pVM->pgm.s.abZeroPg;
1692 }
1693# ifdef PGM_WITH_PHYS_TLB
1694 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1695 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1696 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1697 else
1698 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1699# else
1700 pTlbe->GCPhys = NIL_RTGCPHYS;
1701# endif
1702 pTlbe->pPage = pPage;
1703 return VINF_SUCCESS;
1704}
1705
1706
1707/**
1708 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1709 * own the PGM lock and therefore not need to lock the mapped page.
1710 *
1711 * @returns VBox status code.
1712 * @retval VINF_SUCCESS on success.
1713 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1714 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1715 *
1716 * @param pVM The cross context VM structure.
1717 * @param GCPhys The guest physical address of the page that should be mapped.
1718 * @param pPage Pointer to the PGMPAGE structure for the page.
1719 * @param ppv Where to store the address corresponding to GCPhys.
1720 *
1721 * @internal
1722 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1723 */
1724int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1725{
1726 int rc;
1727 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1728 PGM_LOCK_ASSERT_OWNER(pVM);
1729 pVM->pgm.s.cDeprecatedPageLocks++;
1730
1731 /*
1732 * Make sure the page is writable.
1733 */
1734 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1735 {
1736 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1737 if (RT_FAILURE(rc))
1738 return rc;
1739 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1740 }
1741 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1742
1743 /*
1744 * Get the mapping address.
1745 */
1746 PPGMPAGEMAPTLBE pTlbe;
1747 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1748 if (RT_FAILURE(rc))
1749 return rc;
1750 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1751 return VINF_SUCCESS;
1752}
1753
1754
1755/**
1756 * Locks a page mapping for writing.
1757 *
1758 * @param pVM The cross context VM structure.
1759 * @param pPage The page.
1760 * @param pTlbe The mapping TLB entry for the page.
1761 * @param pLock The lock structure (output).
1762 */
1763DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1764{
1765# ifndef IN_RING0
1766 PPGMPAGEMAP pMap = pTlbe->pMap;
1767 if (pMap)
1768 pMap->cRefs++;
1769# else
1770 RT_NOREF(pTlbe);
1771# endif
1772
1773 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1774 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1775 {
1776 if (cLocks == 0)
1777 pVM->pgm.s.cWriteLockedPages++;
1778 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1779 }
1780 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1781 {
1782 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1783 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1784# ifndef IN_RING0
1785 if (pMap)
1786 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1787# endif
1788 }
1789
1790 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1791# ifndef IN_RING0
1792 pLock->pvMap = pMap;
1793# else
1794 pLock->pvMap = NULL;
1795# endif
1796}
1797
1798/**
1799 * Locks a page mapping for reading.
1800 *
1801 * @param pVM The cross context VM structure.
1802 * @param pPage The page.
1803 * @param pTlbe The mapping TLB entry for the page.
1804 * @param pLock The lock structure (output).
1805 */
1806DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1807{
1808# ifndef IN_RING0
1809 PPGMPAGEMAP pMap = pTlbe->pMap;
1810 if (pMap)
1811 pMap->cRefs++;
1812# else
1813 RT_NOREF(pTlbe);
1814# endif
1815
1816 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1817 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1818 {
1819 if (cLocks == 0)
1820 pVM->pgm.s.cReadLockedPages++;
1821 PGM_PAGE_INC_READ_LOCKS(pPage);
1822 }
1823 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1824 {
1825 PGM_PAGE_INC_READ_LOCKS(pPage);
1826 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1827# ifndef IN_RING0
1828 if (pMap)
1829 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1830# endif
1831 }
1832
1833 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1834# ifndef IN_RING0
1835 pLock->pvMap = pMap;
1836# else
1837 pLock->pvMap = NULL;
1838# endif
1839}
1840
1841
1842/**
1843 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1844 * own the PGM lock and have access to the page structure.
1845 *
1846 * @returns VBox status code.
1847 * @retval VINF_SUCCESS on success.
1848 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1849 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1850 *
1851 * @param pVM The cross context VM structure.
1852 * @param GCPhys The guest physical address of the page that should be mapped.
1853 * @param pPage Pointer to the PGMPAGE structure for the page.
1854 * @param ppv Where to store the address corresponding to GCPhys.
1855 * @param pLock Where to store the lock information that
1856 * pgmPhysReleaseInternalPageMappingLock needs.
1857 *
1858 * @internal
1859 */
1860int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1861{
1862 int rc;
1863 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1864 PGM_LOCK_ASSERT_OWNER(pVM);
1865
1866 /*
1867 * Make sure the page is writable.
1868 */
1869 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1870 {
1871 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1872 if (RT_FAILURE(rc))
1873 return rc;
1874 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1875 }
1876 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1877
1878 /*
1879 * Do the job.
1880 */
1881 PPGMPAGEMAPTLBE pTlbe;
1882 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1883 if (RT_FAILURE(rc))
1884 return rc;
1885 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1886 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1887 return VINF_SUCCESS;
1888}
1889
1890
1891/**
1892 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1893 * own the PGM lock and have access to the page structure.
1894 *
1895 * @returns VBox status code.
1896 * @retval VINF_SUCCESS on success.
1897 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1898 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1899 *
1900 * @param pVM The cross context VM structure.
1901 * @param GCPhys The guest physical address of the page that should be mapped.
1902 * @param pPage Pointer to the PGMPAGE structure for the page.
1903 * @param ppv Where to store the address corresponding to GCPhys.
1904 * @param pLock Where to store the lock information that
1905 * pgmPhysReleaseInternalPageMappingLock needs.
1906 *
1907 * @internal
1908 */
1909int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1910{
1911 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1912 PGM_LOCK_ASSERT_OWNER(pVM);
1913 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1914
1915 /*
1916 * Do the job.
1917 */
1918 PPGMPAGEMAPTLBE pTlbe;
1919 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1920 if (RT_FAILURE(rc))
1921 return rc;
1922 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1923 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1924 return VINF_SUCCESS;
1925}
1926
1927
1928/**
1929 * Requests the mapping of a guest page into the current context.
1930 *
1931 * This API should only be used for very short term, as it will consume scarse
1932 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1933 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1934 *
1935 * This API will assume your intention is to write to the page, and will
1936 * therefore replace shared and zero pages. If you do not intend to modify
1937 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1938 *
1939 * @returns VBox status code.
1940 * @retval VINF_SUCCESS on success.
1941 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1942 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1943 *
1944 * @param pVM The cross context VM structure.
1945 * @param GCPhys The guest physical address of the page that should be
1946 * mapped.
1947 * @param ppv Where to store the address corresponding to GCPhys.
1948 * @param pLock Where to store the lock information that
1949 * PGMPhysReleasePageMappingLock needs.
1950 *
1951 * @remarks The caller is responsible for dealing with access handlers.
1952 * @todo Add an informational return code for pages with access handlers?
1953 *
1954 * @remark Avoid calling this API from within critical sections (other than
1955 * the PGM one) because of the deadlock risk. External threads may
1956 * need to delegate jobs to the EMTs.
1957 * @remarks Only one page is mapped! Make no assumption about what's after or
1958 * before the returned page!
1959 * @thread Any thread.
1960 */
1961VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1962{
1963 int rc = PGM_LOCK(pVM);
1964 AssertRCReturn(rc, rc);
1965
1966 /*
1967 * Query the Physical TLB entry for the page (may fail).
1968 */
1969 PPGMPAGEMAPTLBE pTlbe;
1970 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1971 if (RT_SUCCESS(rc))
1972 {
1973 /*
1974 * If the page is shared, the zero page, or being write monitored
1975 * it must be converted to a page that's writable if possible.
1976 */
1977 PPGMPAGE pPage = pTlbe->pPage;
1978 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1979 {
1980 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1981 if (RT_SUCCESS(rc))
1982 {
1983 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1984 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1985 }
1986 }
1987 if (RT_SUCCESS(rc))
1988 {
1989 /*
1990 * Now, just perform the locking and calculate the return address.
1991 */
1992 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1993 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1994 }
1995 }
1996
1997 PGM_UNLOCK(pVM);
1998 return rc;
1999}
2000
2001
2002/**
2003 * Requests the mapping of a guest page into the current context.
2004 *
2005 * This API should only be used for very short term, as it will consume scarse
2006 * resources (R0 and GC) in the mapping cache. When you're done with the page,
2007 * call PGMPhysReleasePageMappingLock() ASAP to release it.
2008 *
2009 * @returns VBox status code.
2010 * @retval VINF_SUCCESS on success.
2011 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2012 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2013 *
2014 * @param pVM The cross context VM structure.
2015 * @param GCPhys The guest physical address of the page that should be
2016 * mapped.
2017 * @param ppv Where to store the address corresponding to GCPhys.
2018 * @param pLock Where to store the lock information that
2019 * PGMPhysReleasePageMappingLock needs.
2020 *
2021 * @remarks The caller is responsible for dealing with access handlers.
2022 * @todo Add an informational return code for pages with access handlers?
2023 *
2024 * @remarks Avoid calling this API from within critical sections (other than
2025 * the PGM one) because of the deadlock risk.
2026 * @remarks Only one page is mapped! Make no assumption about what's after or
2027 * before the returned page!
2028 * @thread Any thread.
2029 */
2030VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
2031{
2032 int rc = PGM_LOCK(pVM);
2033 AssertRCReturn(rc, rc);
2034
2035 /*
2036 * Query the Physical TLB entry for the page (may fail).
2037 */
2038 PPGMPAGEMAPTLBE pTlbe;
2039 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
2040 if (RT_SUCCESS(rc))
2041 {
2042 /* MMIO pages doesn't have any readable backing. */
2043 PPGMPAGE pPage = pTlbe->pPage;
2044 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
2045 rc = VERR_PGM_PHYS_PAGE_RESERVED;
2046 else
2047 {
2048 /*
2049 * Now, just perform the locking and calculate the return address.
2050 */
2051 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2052 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
2053 }
2054 }
2055
2056 PGM_UNLOCK(pVM);
2057 return rc;
2058}
2059
2060
2061/**
2062 * Requests the mapping of a guest page given by virtual address into the current context.
2063 *
2064 * This API should only be used for very short term, as it will consume
2065 * scarse resources (R0 and GC) in the mapping cache. When you're done
2066 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2067 *
2068 * This API will assume your intention is to write to the page, and will
2069 * therefore replace shared and zero pages. If you do not intend to modify
2070 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2071 *
2072 * @returns VBox status code.
2073 * @retval VINF_SUCCESS on success.
2074 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2075 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2076 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2077 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2078 *
2079 * @param pVCpu The cross context virtual CPU structure.
2080 * @param GCPtr The guest physical address of the page that should be
2081 * mapped.
2082 * @param ppv Where to store the address corresponding to GCPhys.
2083 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2084 *
2085 * @remark Avoid calling this API from within critical sections (other than
2086 * the PGM one) because of the deadlock risk.
2087 * @thread EMT
2088 */
2089VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2090{
2091 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2092 RTGCPHYS GCPhys;
2093 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2094 if (RT_SUCCESS(rc))
2095 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2096 return rc;
2097}
2098
2099
2100/**
2101 * Requests the mapping of a guest page given by virtual address into the current context.
2102 *
2103 * This API should only be used for very short term, as it will consume
2104 * scarse resources (R0 and GC) in the mapping cache. When you're done
2105 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2106 *
2107 * @returns VBox status code.
2108 * @retval VINF_SUCCESS on success.
2109 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2110 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2111 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2112 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2113 *
2114 * @param pVCpu The cross context virtual CPU structure.
2115 * @param GCPtr The guest physical address of the page that should be
2116 * mapped.
2117 * @param ppv Where to store the address corresponding to GCPtr.
2118 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2119 *
2120 * @remark Avoid calling this API from within critical sections (other than
2121 * the PGM one) because of the deadlock risk.
2122 * @thread EMT
2123 */
2124VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2125{
2126 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2127 RTGCPHYS GCPhys;
2128 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2129 if (RT_SUCCESS(rc))
2130 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2131 return rc;
2132}
2133
2134
2135/**
2136 * Release the mapping of a guest page.
2137 *
2138 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2139 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2140 *
2141 * @param pVM The cross context VM structure.
2142 * @param pLock The lock structure initialized by the mapping function.
2143 */
2144VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2145{
2146# ifndef IN_RING0
2147 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2148# endif
2149 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2150 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2151
2152 pLock->uPageAndType = 0;
2153 pLock->pvMap = NULL;
2154
2155 PGM_LOCK_VOID(pVM);
2156 if (fWriteLock)
2157 {
2158 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2159 Assert(cLocks > 0);
2160 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2161 {
2162 if (cLocks == 1)
2163 {
2164 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2165 pVM->pgm.s.cWriteLockedPages--;
2166 }
2167 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2168 }
2169
2170 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2171 { /* probably extremely likely */ }
2172 else
2173 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2174 }
2175 else
2176 {
2177 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2178 Assert(cLocks > 0);
2179 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2180 {
2181 if (cLocks == 1)
2182 {
2183 Assert(pVM->pgm.s.cReadLockedPages > 0);
2184 pVM->pgm.s.cReadLockedPages--;
2185 }
2186 PGM_PAGE_DEC_READ_LOCKS(pPage);
2187 }
2188 }
2189
2190# ifndef IN_RING0
2191 if (pMap)
2192 {
2193 Assert(pMap->cRefs >= 1);
2194 pMap->cRefs--;
2195 }
2196# endif
2197 PGM_UNLOCK(pVM);
2198}
2199
2200
2201#ifdef IN_RING3
2202/**
2203 * Release the mapping of multiple guest pages.
2204 *
2205 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2206 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2207 *
2208 * @param pVM The cross context VM structure.
2209 * @param cPages Number of pages to unlock.
2210 * @param paLocks Array of locks lock structure initialized by the mapping
2211 * function.
2212 */
2213VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2214{
2215 Assert(cPages > 0);
2216 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2217#ifdef VBOX_STRICT
2218 for (uint32_t i = 1; i < cPages; i++)
2219 {
2220 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2221 AssertPtr(paLocks[i].uPageAndType);
2222 }
2223#endif
2224
2225 PGM_LOCK_VOID(pVM);
2226 if (fWriteLock)
2227 {
2228 /*
2229 * Write locks:
2230 */
2231 for (uint32_t i = 0; i < cPages; i++)
2232 {
2233 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2234 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2235 Assert(cLocks > 0);
2236 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2237 {
2238 if (cLocks == 1)
2239 {
2240 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2241 pVM->pgm.s.cWriteLockedPages--;
2242 }
2243 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2244 }
2245
2246 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2247 { /* probably extremely likely */ }
2248 else
2249 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2250
2251 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2252 if (pMap)
2253 {
2254 Assert(pMap->cRefs >= 1);
2255 pMap->cRefs--;
2256 }
2257
2258 /* Yield the lock: */
2259 if ((i & 1023) == 1023 && i + 1 < cPages)
2260 {
2261 PGM_UNLOCK(pVM);
2262 PGM_LOCK_VOID(pVM);
2263 }
2264 }
2265 }
2266 else
2267 {
2268 /*
2269 * Read locks:
2270 */
2271 for (uint32_t i = 0; i < cPages; i++)
2272 {
2273 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2274 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2275 Assert(cLocks > 0);
2276 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2277 {
2278 if (cLocks == 1)
2279 {
2280 Assert(pVM->pgm.s.cReadLockedPages > 0);
2281 pVM->pgm.s.cReadLockedPages--;
2282 }
2283 PGM_PAGE_DEC_READ_LOCKS(pPage);
2284 }
2285
2286 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2287 if (pMap)
2288 {
2289 Assert(pMap->cRefs >= 1);
2290 pMap->cRefs--;
2291 }
2292
2293 /* Yield the lock: */
2294 if ((i & 1023) == 1023 && i + 1 < cPages)
2295 {
2296 PGM_UNLOCK(pVM);
2297 PGM_LOCK_VOID(pVM);
2298 }
2299 }
2300 }
2301 PGM_UNLOCK(pVM);
2302
2303 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2304}
2305#endif /* IN_RING3 */
2306
2307
2308/**
2309 * Release the internal mapping of a guest page.
2310 *
2311 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2312 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2313 *
2314 * @param pVM The cross context VM structure.
2315 * @param pLock The lock structure initialized by the mapping function.
2316 *
2317 * @remarks Caller must hold the PGM lock.
2318 */
2319void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2320{
2321 PGM_LOCK_ASSERT_OWNER(pVM);
2322 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2323}
2324
2325
2326/**
2327 * Converts a GC physical address to a HC ring-3 pointer.
2328 *
2329 * @returns VINF_SUCCESS on success.
2330 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2331 * page but has no physical backing.
2332 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2333 * GC physical address.
2334 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2335 * a dynamic ram chunk boundary
2336 *
2337 * @param pVM The cross context VM structure.
2338 * @param GCPhys The GC physical address to convert.
2339 * @param pR3Ptr Where to store the R3 pointer on success.
2340 *
2341 * @deprecated Avoid when possible!
2342 */
2343int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2344{
2345/** @todo this is kind of hacky and needs some more work. */
2346#ifndef DEBUG_sandervl
2347 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2348#endif
2349
2350 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2351 PGM_LOCK_VOID(pVM);
2352
2353 PPGMRAMRANGE pRam;
2354 PPGMPAGE pPage;
2355 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2356 if (RT_SUCCESS(rc))
2357 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2358
2359 PGM_UNLOCK(pVM);
2360 Assert(rc <= VINF_SUCCESS);
2361 return rc;
2362}
2363
2364
2365/**
2366 * Converts a guest pointer to a GC physical address.
2367 *
2368 * This uses the current CR3/CR0/CR4 of the guest.
2369 *
2370 * @returns VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure.
2372 * @param GCPtr The guest pointer to convert.
2373 * @param pGCPhys Where to store the GC physical address.
2374 */
2375VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2376{
2377 PGMPTWALK Walk;
2378 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2379 if (pGCPhys && RT_SUCCESS(rc))
2380 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2381 return rc;
2382}
2383
2384
2385/**
2386 * Converts a guest pointer to a HC physical address.
2387 *
2388 * This uses the current CR3/CR0/CR4 of the guest.
2389 *
2390 * @returns VBox status code.
2391 * @param pVCpu The cross context virtual CPU structure.
2392 * @param GCPtr The guest pointer to convert.
2393 * @param pHCPhys Where to store the HC physical address.
2394 */
2395VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2396{
2397 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2398 PGMPTWALK Walk;
2399 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2400 if (RT_SUCCESS(rc))
2401 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2402 return rc;
2403}
2404
2405
2406
2407#undef LOG_GROUP
2408#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2409
2410
2411#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2412/**
2413 * Cache PGMPhys memory access
2414 *
2415 * @param pVM The cross context VM structure.
2416 * @param pCache Cache structure pointer
2417 * @param GCPhys GC physical address
2418 * @param pbR3 HC pointer corresponding to physical page
2419 *
2420 * @thread EMT.
2421 */
2422static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2423{
2424 uint32_t iCacheIndex;
2425
2426 Assert(VM_IS_EMT(pVM));
2427
2428 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2429 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
2430
2431 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2432
2433 ASMBitSet(&pCache->aEntries, iCacheIndex);
2434
2435 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2436 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2437}
2438#endif /* IN_RING3 */
2439
2440
2441/**
2442 * Deals with reading from a page with one or more ALL access handlers.
2443 *
2444 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2445 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2446 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2447 *
2448 * @param pVM The cross context VM structure.
2449 * @param pPage The page descriptor.
2450 * @param GCPhys The physical address to start reading at.
2451 * @param pvBuf Where to put the bits we read.
2452 * @param cb How much to read - less or equal to a page.
2453 * @param enmOrigin The origin of this call.
2454 */
2455static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2456 PGMACCESSORIGIN enmOrigin)
2457{
2458 /*
2459 * The most frequent access here is MMIO and shadowed ROM.
2460 * The current code ASSUMES all these access handlers covers full pages!
2461 */
2462
2463 /*
2464 * Whatever we do we need the source page, map it first.
2465 */
2466 PGMPAGEMAPLOCK PgMpLck;
2467 const void *pvSrc = NULL;
2468 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2469/** @todo Check how this can work for MMIO pages? */
2470 if (RT_FAILURE(rc))
2471 {
2472 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2473 GCPhys, pPage, rc));
2474 memset(pvBuf, 0xff, cb);
2475 return VINF_SUCCESS;
2476 }
2477
2478 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2479
2480 /*
2481 * Deal with any physical handlers.
2482 */
2483 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2484 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2485 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2486 {
2487 PPGMPHYSHANDLER pCur;
2488 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2489 if (RT_SUCCESS(rc))
2490 {
2491 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2492 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
2493 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2494#ifndef IN_RING3
2495 if (enmOrigin != PGMACCESSORIGIN_IEM)
2496 {
2497 /* Cannot reliably handle informational status codes in this context */
2498 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2499 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2500 }
2501#endif
2502 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2503 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
2504 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2505 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2506
2507 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2508 STAM_PROFILE_START(&pCur->Stat, h);
2509 PGM_LOCK_ASSERT_OWNER(pVM);
2510
2511 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2512 PGM_UNLOCK(pVM);
2513 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2514 PGM_LOCK_VOID(pVM);
2515
2516 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2517 pCur = NULL; /* might not be valid anymore. */
2518 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2519 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2520 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2521 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2522 {
2523 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2524 return rcStrict;
2525 }
2526 }
2527 else if (rc == VERR_NOT_FOUND)
2528 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
2529 else
2530 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
2531 }
2532
2533 /*
2534 * Take the default action.
2535 */
2536 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2537 {
2538 memcpy(pvBuf, pvSrc, cb);
2539 rcStrict = VINF_SUCCESS;
2540 }
2541 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2542 return rcStrict;
2543}
2544
2545
2546/**
2547 * Read physical memory.
2548 *
2549 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2550 * want to ignore those.
2551 *
2552 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2553 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2554 * @retval VINF_SUCCESS in all context - read completed.
2555 *
2556 * @retval VINF_EM_OFF in RC and R0 - read completed.
2557 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2558 * @retval VINF_EM_RESET in RC and R0 - read completed.
2559 * @retval VINF_EM_HALT in RC and R0 - read completed.
2560 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2561 *
2562 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2563 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2564 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2565 *
2566 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2567 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2568 *
2569 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2570 *
2571 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2572 * haven't been cleared for strict status codes yet.
2573 *
2574 * @param pVM The cross context VM structure.
2575 * @param GCPhys Physical address start reading from.
2576 * @param pvBuf Where to put the read bits.
2577 * @param cbRead How many bytes to read.
2578 * @param enmOrigin The origin of this call.
2579 */
2580VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2581{
2582 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2583 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2584
2585 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2586 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2587
2588 PGM_LOCK_VOID(pVM);
2589
2590 /*
2591 * Copy loop on ram ranges.
2592 */
2593 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2594 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2595 for (;;)
2596 {
2597 /* Inside range or not? */
2598 if (pRam && GCPhys >= pRam->GCPhys)
2599 {
2600 /*
2601 * Must work our way thru this page by page.
2602 */
2603 RTGCPHYS off = GCPhys - pRam->GCPhys;
2604 while (off < pRam->cb)
2605 {
2606 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2607 PPGMPAGE pPage = &pRam->aPages[iPage];
2608 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2609 if (cb > cbRead)
2610 cb = cbRead;
2611
2612 /*
2613 * Normal page? Get the pointer to it.
2614 */
2615 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2616 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2617 {
2618 /*
2619 * Get the pointer to the page.
2620 */
2621 PGMPAGEMAPLOCK PgMpLck;
2622 const void *pvSrc;
2623 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2624 if (RT_SUCCESS(rc))
2625 {
2626 memcpy(pvBuf, pvSrc, cb);
2627 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2628 }
2629 else
2630 {
2631 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2632 pRam->GCPhys + off, pPage, rc));
2633 memset(pvBuf, 0xff, cb);
2634 }
2635 }
2636 /*
2637 * Have ALL/MMIO access handlers.
2638 */
2639 else
2640 {
2641 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2642 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2643 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2644 else
2645 {
2646 /* Set the remaining buffer to a known value. */
2647 memset(pvBuf, 0xff, cbRead);
2648 PGM_UNLOCK(pVM);
2649 return rcStrict2;
2650 }
2651 }
2652
2653 /* next page */
2654 if (cb >= cbRead)
2655 {
2656 PGM_UNLOCK(pVM);
2657 return rcStrict;
2658 }
2659 cbRead -= cb;
2660 off += cb;
2661 pvBuf = (char *)pvBuf + cb;
2662 } /* walk pages in ram range. */
2663
2664 GCPhys = pRam->GCPhysLast + 1;
2665 }
2666 else
2667 {
2668 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2669
2670 /*
2671 * Unassigned address space.
2672 */
2673 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2674 if (cb >= cbRead)
2675 {
2676 memset(pvBuf, 0xff, cbRead);
2677 break;
2678 }
2679 memset(pvBuf, 0xff, cb);
2680
2681 cbRead -= cb;
2682 pvBuf = (char *)pvBuf + cb;
2683 GCPhys += cb;
2684 }
2685
2686 /* Advance range if necessary. */
2687 while (pRam && GCPhys > pRam->GCPhysLast)
2688 pRam = pRam->CTX_SUFF(pNext);
2689 } /* Ram range walk */
2690
2691 PGM_UNLOCK(pVM);
2692 return rcStrict;
2693}
2694
2695
2696/**
2697 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2698 *
2699 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2700 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2701 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2702 *
2703 * @param pVM The cross context VM structure.
2704 * @param pPage The page descriptor.
2705 * @param GCPhys The physical address to start writing at.
2706 * @param pvBuf What to write.
2707 * @param cbWrite How much to write - less or equal to a page.
2708 * @param enmOrigin The origin of this call.
2709 */
2710static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2711 PGMACCESSORIGIN enmOrigin)
2712{
2713 PGMPAGEMAPLOCK PgMpLck;
2714 void *pvDst = NULL;
2715 VBOXSTRICTRC rcStrict;
2716
2717 /*
2718 * Give priority to physical handlers (like #PF does).
2719 *
2720 * Hope for a lonely physical handler first that covers the whole write
2721 * area. This should be a pretty frequent case with MMIO and the heavy
2722 * usage of full page handlers in the page pool.
2723 */
2724 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2725 PPGMPHYSHANDLER pCur;
2726 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2727 if (RT_SUCCESS(rcStrict))
2728 {
2729 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2730#ifndef IN_RING3
2731 if (enmOrigin != PGMACCESSORIGIN_IEM)
2732 /* Cannot reliably handle informational status codes in this context */
2733 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2734#endif
2735 size_t cbRange = pCur->KeyLast - GCPhys + 1;
2736 if (cbRange > cbWrite)
2737 cbRange = cbWrite;
2738
2739 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
2740 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2741 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2742 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2743 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2744 else
2745 rcStrict = VINF_SUCCESS;
2746 if (RT_SUCCESS(rcStrict))
2747 {
2748 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2749 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2750 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2751 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2752 STAM_PROFILE_START(&pCur->Stat, h);
2753
2754 /* Most handlers will want to release the PGM lock for deadlock prevention
2755 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2756 dirty page trackers will want to keep it for performance reasons. */
2757 PGM_LOCK_ASSERT_OWNER(pVM);
2758 if (pCurType->fKeepPgmLock)
2759 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2760 else
2761 {
2762 PGM_UNLOCK(pVM);
2763 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2764 PGM_LOCK_VOID(pVM);
2765 }
2766
2767 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2768 pCur = NULL; /* might not be valid anymore. */
2769 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2770 {
2771 if (pvDst)
2772 memcpy(pvDst, pvBuf, cbRange);
2773 rcStrict = VINF_SUCCESS;
2774 }
2775 else
2776 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2777 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2778 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2779 }
2780 else
2781 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2782 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2783 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2784 {
2785 if (pvDst)
2786 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2787 return rcStrict;
2788 }
2789
2790 /* more fun to be had below */
2791 cbWrite -= cbRange;
2792 GCPhys += cbRange;
2793 pvBuf = (uint8_t *)pvBuf + cbRange;
2794 pvDst = (uint8_t *)pvDst + cbRange;
2795 }
2796 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
2797 rcStrict = VINF_SUCCESS;
2798 else
2799 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2800 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
2801
2802 /*
2803 * Deal with all the odd ends (used to be deal with virt+phys).
2804 */
2805 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2806
2807 /* We need a writable destination page. */
2808 if (!pvDst)
2809 {
2810 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2811 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2812 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2813 rc2);
2814 }
2815
2816 /** @todo clean up this code some more now there are no virtual handlers any
2817 * more. */
2818 /* The loop state (big + ugly). */
2819 PPGMPHYSHANDLER pPhys = NULL;
2820 uint32_t offPhys = GUEST_PAGE_SIZE;
2821 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2822 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2823
2824 /* The loop. */
2825 for (;;)
2826 {
2827 if (fMorePhys && !pPhys)
2828 {
2829 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
2830 if (RT_SUCCESS_NP(rcStrict))
2831 {
2832 offPhys = 0;
2833 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2834 }
2835 else
2836 {
2837 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2838
2839 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2840 GCPhys, &pPhys);
2841 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
2842 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2843
2844 if ( RT_SUCCESS(rcStrict)
2845 && pPhys->Key <= GCPhys + (cbWrite - 1))
2846 {
2847 offPhys = pPhys->Key - GCPhys;
2848 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2849 Assert(pPhys->KeyLast - pPhys->Key < _4G);
2850 }
2851 else
2852 {
2853 pPhys = NULL;
2854 fMorePhys = false;
2855 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2856 }
2857 }
2858 }
2859
2860 /*
2861 * Handle access to space without handlers (that's easy).
2862 */
2863 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2864 uint32_t cbRange = (uint32_t)cbWrite;
2865 Assert(cbRange == cbWrite);
2866
2867 /*
2868 * Physical handler.
2869 */
2870 if (!offPhys)
2871 {
2872#ifndef IN_RING3
2873 if (enmOrigin != PGMACCESSORIGIN_IEM)
2874 /* Cannot reliably handle informational status codes in this context */
2875 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2876#endif
2877 if (cbRange > offPhysLast + 1)
2878 cbRange = offPhysLast + 1;
2879
2880 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
2881 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2882 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2883 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2884
2885 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2886 STAM_PROFILE_START(&pPhys->Stat, h);
2887
2888 /* Most handlers will want to release the PGM lock for deadlock prevention
2889 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2890 dirty page trackers will want to keep it for performance reasons. */
2891 PGM_LOCK_ASSERT_OWNER(pVM);
2892 if (pCurType->fKeepPgmLock)
2893 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2894 else
2895 {
2896 PGM_UNLOCK(pVM);
2897 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2898 PGM_LOCK_VOID(pVM);
2899 }
2900
2901 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2902 pPhys = NULL; /* might not be valid anymore. */
2903 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2904 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2905 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2906 }
2907
2908 /*
2909 * Execute the default action and merge the status codes.
2910 */
2911 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2912 {
2913 memcpy(pvDst, pvBuf, cbRange);
2914 rcStrict2 = VINF_SUCCESS;
2915 }
2916 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2917 {
2918 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2919 return rcStrict2;
2920 }
2921 else
2922 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2923
2924 /*
2925 * Advance if we've got more stuff to do.
2926 */
2927 if (cbRange >= cbWrite)
2928 {
2929 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2930 return rcStrict;
2931 }
2932
2933
2934 cbWrite -= cbRange;
2935 GCPhys += cbRange;
2936 pvBuf = (uint8_t *)pvBuf + cbRange;
2937 pvDst = (uint8_t *)pvDst + cbRange;
2938
2939 offPhys -= cbRange;
2940 offPhysLast -= cbRange;
2941 }
2942}
2943
2944
2945/**
2946 * Write to physical memory.
2947 *
2948 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2949 * want to ignore those.
2950 *
2951 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2952 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2953 * @retval VINF_SUCCESS in all context - write completed.
2954 *
2955 * @retval VINF_EM_OFF in RC and R0 - write completed.
2956 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2957 * @retval VINF_EM_RESET in RC and R0 - write completed.
2958 * @retval VINF_EM_HALT in RC and R0 - write completed.
2959 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2960 *
2961 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2962 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2963 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2964 *
2965 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2966 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2967 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2968 *
2969 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2970 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2971 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2972 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2973 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2974 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2975 *
2976 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2977 * haven't been cleared for strict status codes yet.
2978 *
2979 *
2980 * @param pVM The cross context VM structure.
2981 * @param GCPhys Physical address to write to.
2982 * @param pvBuf What to write.
2983 * @param cbWrite How many bytes to write.
2984 * @param enmOrigin Who is calling.
2985 */
2986VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2987{
2988 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2989 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2990 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2991
2992 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2993 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2994
2995 PGM_LOCK_VOID(pVM);
2996
2997 /*
2998 * Copy loop on ram ranges.
2999 */
3000 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3001 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
3002 for (;;)
3003 {
3004 /* Inside range or not? */
3005 if (pRam && GCPhys >= pRam->GCPhys)
3006 {
3007 /*
3008 * Must work our way thru this page by page.
3009 */
3010 RTGCPTR off = GCPhys - pRam->GCPhys;
3011 while (off < pRam->cb)
3012 {
3013 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
3014 PPGMPAGE pPage = &pRam->aPages[iPage];
3015 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
3016 if (cb > cbWrite)
3017 cb = cbWrite;
3018
3019 /*
3020 * Normal page? Get the pointer to it.
3021 */
3022 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3023 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3024 {
3025 PGMPAGEMAPLOCK PgMpLck;
3026 void *pvDst;
3027 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3028 if (RT_SUCCESS(rc))
3029 {
3030 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3031 memcpy(pvDst, pvBuf, cb);
3032 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3033 }
3034 /* Ignore writes to ballooned pages. */
3035 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3036 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3037 pRam->GCPhys + off, pPage, rc));
3038 }
3039 /*
3040 * Active WRITE or ALL access handlers.
3041 */
3042 else
3043 {
3044 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3045 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3046 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3047 else
3048 {
3049 PGM_UNLOCK(pVM);
3050 return rcStrict2;
3051 }
3052 }
3053
3054 /* next page */
3055 if (cb >= cbWrite)
3056 {
3057 PGM_UNLOCK(pVM);
3058 return rcStrict;
3059 }
3060
3061 cbWrite -= cb;
3062 off += cb;
3063 pvBuf = (const char *)pvBuf + cb;
3064 } /* walk pages in ram range */
3065
3066 GCPhys = pRam->GCPhysLast + 1;
3067 }
3068 else
3069 {
3070 /*
3071 * Unassigned address space, skip it.
3072 */
3073 if (!pRam)
3074 break;
3075 size_t cb = pRam->GCPhys - GCPhys;
3076 if (cb >= cbWrite)
3077 break;
3078 cbWrite -= cb;
3079 pvBuf = (const char *)pvBuf + cb;
3080 GCPhys += cb;
3081 }
3082
3083 /* Advance range if necessary. */
3084 while (pRam && GCPhys > pRam->GCPhysLast)
3085 pRam = pRam->CTX_SUFF(pNext);
3086 } /* Ram range walk */
3087
3088 PGM_UNLOCK(pVM);
3089 return rcStrict;
3090}
3091
3092
3093/**
3094 * Read from guest physical memory by GC physical address, bypassing
3095 * MMIO and access handlers.
3096 *
3097 * @returns VBox status code.
3098 * @param pVM The cross context VM structure.
3099 * @param pvDst The destination address.
3100 * @param GCPhysSrc The source address (GC physical address).
3101 * @param cb The number of bytes to read.
3102 */
3103VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3104{
3105 /*
3106 * Treat the first page as a special case.
3107 */
3108 if (!cb)
3109 return VINF_SUCCESS;
3110
3111 /* map the 1st page */
3112 void const *pvSrc;
3113 PGMPAGEMAPLOCK Lock;
3114 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3115 if (RT_FAILURE(rc))
3116 return rc;
3117
3118 /* optimize for the case where access is completely within the first page. */
3119 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3120 if (RT_LIKELY(cb <= cbPage))
3121 {
3122 memcpy(pvDst, pvSrc, cb);
3123 PGMPhysReleasePageMappingLock(pVM, &Lock);
3124 return VINF_SUCCESS;
3125 }
3126
3127 /* copy to the end of the page. */
3128 memcpy(pvDst, pvSrc, cbPage);
3129 PGMPhysReleasePageMappingLock(pVM, &Lock);
3130 GCPhysSrc += cbPage;
3131 pvDst = (uint8_t *)pvDst + cbPage;
3132 cb -= cbPage;
3133
3134 /*
3135 * Page by page.
3136 */
3137 for (;;)
3138 {
3139 /* map the page */
3140 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3141 if (RT_FAILURE(rc))
3142 return rc;
3143
3144 /* last page? */
3145 if (cb <= GUEST_PAGE_SIZE)
3146 {
3147 memcpy(pvDst, pvSrc, cb);
3148 PGMPhysReleasePageMappingLock(pVM, &Lock);
3149 return VINF_SUCCESS;
3150 }
3151
3152 /* copy the entire page and advance */
3153 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3154 PGMPhysReleasePageMappingLock(pVM, &Lock);
3155 GCPhysSrc += GUEST_PAGE_SIZE;
3156 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3157 cb -= GUEST_PAGE_SIZE;
3158 }
3159 /* won't ever get here. */
3160}
3161
3162
3163/**
3164 * Write to guest physical memory referenced by GC pointer.
3165 * Write memory to GC physical address in guest physical memory.
3166 *
3167 * This will bypass MMIO and access handlers.
3168 *
3169 * @returns VBox status code.
3170 * @param pVM The cross context VM structure.
3171 * @param GCPhysDst The GC physical address of the destination.
3172 * @param pvSrc The source buffer.
3173 * @param cb The number of bytes to write.
3174 */
3175VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3176{
3177 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3178
3179 /*
3180 * Treat the first page as a special case.
3181 */
3182 if (!cb)
3183 return VINF_SUCCESS;
3184
3185 /* map the 1st page */
3186 void *pvDst;
3187 PGMPAGEMAPLOCK Lock;
3188 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3189 if (RT_FAILURE(rc))
3190 return rc;
3191
3192 /* optimize for the case where access is completely within the first page. */
3193 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3194 if (RT_LIKELY(cb <= cbPage))
3195 {
3196 memcpy(pvDst, pvSrc, cb);
3197 PGMPhysReleasePageMappingLock(pVM, &Lock);
3198 return VINF_SUCCESS;
3199 }
3200
3201 /* copy to the end of the page. */
3202 memcpy(pvDst, pvSrc, cbPage);
3203 PGMPhysReleasePageMappingLock(pVM, &Lock);
3204 GCPhysDst += cbPage;
3205 pvSrc = (const uint8_t *)pvSrc + cbPage;
3206 cb -= cbPage;
3207
3208 /*
3209 * Page by page.
3210 */
3211 for (;;)
3212 {
3213 /* map the page */
3214 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3215 if (RT_FAILURE(rc))
3216 return rc;
3217
3218 /* last page? */
3219 if (cb <= GUEST_PAGE_SIZE)
3220 {
3221 memcpy(pvDst, pvSrc, cb);
3222 PGMPhysReleasePageMappingLock(pVM, &Lock);
3223 return VINF_SUCCESS;
3224 }
3225
3226 /* copy the entire page and advance */
3227 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3228 PGMPhysReleasePageMappingLock(pVM, &Lock);
3229 GCPhysDst += GUEST_PAGE_SIZE;
3230 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3231 cb -= GUEST_PAGE_SIZE;
3232 }
3233 /* won't ever get here. */
3234}
3235
3236
3237/**
3238 * Read from guest physical memory referenced by GC pointer.
3239 *
3240 * This function uses the current CR3/CR0/CR4 of the guest and will
3241 * bypass access handlers and not set any accessed bits.
3242 *
3243 * @returns VBox status code.
3244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3245 * @param pvDst The destination address.
3246 * @param GCPtrSrc The source address (GC pointer).
3247 * @param cb The number of bytes to read.
3248 */
3249VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3250{
3251 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3252/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3253
3254 /*
3255 * Treat the first page as a special case.
3256 */
3257 if (!cb)
3258 return VINF_SUCCESS;
3259
3260 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3261 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3262
3263 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3264 * when many VCPUs are fighting for the lock.
3265 */
3266 PGM_LOCK_VOID(pVM);
3267
3268 /* map the 1st page */
3269 void const *pvSrc;
3270 PGMPAGEMAPLOCK Lock;
3271 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3272 if (RT_FAILURE(rc))
3273 {
3274 PGM_UNLOCK(pVM);
3275 return rc;
3276 }
3277
3278 /* optimize for the case where access is completely within the first page. */
3279 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3280 if (RT_LIKELY(cb <= cbPage))
3281 {
3282 memcpy(pvDst, pvSrc, cb);
3283 PGMPhysReleasePageMappingLock(pVM, &Lock);
3284 PGM_UNLOCK(pVM);
3285 return VINF_SUCCESS;
3286 }
3287
3288 /* copy to the end of the page. */
3289 memcpy(pvDst, pvSrc, cbPage);
3290 PGMPhysReleasePageMappingLock(pVM, &Lock);
3291 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3292 pvDst = (uint8_t *)pvDst + cbPage;
3293 cb -= cbPage;
3294
3295 /*
3296 * Page by page.
3297 */
3298 for (;;)
3299 {
3300 /* map the page */
3301 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3302 if (RT_FAILURE(rc))
3303 {
3304 PGM_UNLOCK(pVM);
3305 return rc;
3306 }
3307
3308 /* last page? */
3309 if (cb <= GUEST_PAGE_SIZE)
3310 {
3311 memcpy(pvDst, pvSrc, cb);
3312 PGMPhysReleasePageMappingLock(pVM, &Lock);
3313 PGM_UNLOCK(pVM);
3314 return VINF_SUCCESS;
3315 }
3316
3317 /* copy the entire page and advance */
3318 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3319 PGMPhysReleasePageMappingLock(pVM, &Lock);
3320 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3321 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3322 cb -= GUEST_PAGE_SIZE;
3323 }
3324 /* won't ever get here. */
3325}
3326
3327
3328/**
3329 * Write to guest physical memory referenced by GC pointer.
3330 *
3331 * This function uses the current CR3/CR0/CR4 of the guest and will
3332 * bypass access handlers and not set dirty or accessed bits.
3333 *
3334 * @returns VBox status code.
3335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3336 * @param GCPtrDst The destination address (GC pointer).
3337 * @param pvSrc The source address.
3338 * @param cb The number of bytes to write.
3339 */
3340VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3341{
3342 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3343 VMCPU_ASSERT_EMT(pVCpu);
3344
3345 /*
3346 * Treat the first page as a special case.
3347 */
3348 if (!cb)
3349 return VINF_SUCCESS;
3350
3351 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3352 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3353
3354 /* map the 1st page */
3355 void *pvDst;
3356 PGMPAGEMAPLOCK Lock;
3357 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3358 if (RT_FAILURE(rc))
3359 return rc;
3360
3361 /* optimize for the case where access is completely within the first page. */
3362 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3363 if (RT_LIKELY(cb <= cbPage))
3364 {
3365 memcpy(pvDst, pvSrc, cb);
3366 PGMPhysReleasePageMappingLock(pVM, &Lock);
3367 return VINF_SUCCESS;
3368 }
3369
3370 /* copy to the end of the page. */
3371 memcpy(pvDst, pvSrc, cbPage);
3372 PGMPhysReleasePageMappingLock(pVM, &Lock);
3373 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3374 pvSrc = (const uint8_t *)pvSrc + cbPage;
3375 cb -= cbPage;
3376
3377 /*
3378 * Page by page.
3379 */
3380 for (;;)
3381 {
3382 /* map the page */
3383 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3384 if (RT_FAILURE(rc))
3385 return rc;
3386
3387 /* last page? */
3388 if (cb <= GUEST_PAGE_SIZE)
3389 {
3390 memcpy(pvDst, pvSrc, cb);
3391 PGMPhysReleasePageMappingLock(pVM, &Lock);
3392 return VINF_SUCCESS;
3393 }
3394
3395 /* copy the entire page and advance */
3396 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3397 PGMPhysReleasePageMappingLock(pVM, &Lock);
3398 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3399 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3400 cb -= GUEST_PAGE_SIZE;
3401 }
3402 /* won't ever get here. */
3403}
3404
3405
3406/**
3407 * Write to guest physical memory referenced by GC pointer and update the PTE.
3408 *
3409 * This function uses the current CR3/CR0/CR4 of the guest and will
3410 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3411 *
3412 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3413 *
3414 * @returns VBox status code.
3415 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3416 * @param GCPtrDst The destination address (GC pointer).
3417 * @param pvSrc The source address.
3418 * @param cb The number of bytes to write.
3419 */
3420VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3421{
3422 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3423 VMCPU_ASSERT_EMT(pVCpu);
3424
3425 /*
3426 * Treat the first page as a special case.
3427 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3428 */
3429 if (!cb)
3430 return VINF_SUCCESS;
3431
3432 /* map the 1st page */
3433 void *pvDst;
3434 PGMPAGEMAPLOCK Lock;
3435 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3436 if (RT_FAILURE(rc))
3437 return rc;
3438
3439 /* optimize for the case where access is completely within the first page. */
3440 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3441 if (RT_LIKELY(cb <= cbPage))
3442 {
3443 memcpy(pvDst, pvSrc, cb);
3444 PGMPhysReleasePageMappingLock(pVM, &Lock);
3445 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3446 return VINF_SUCCESS;
3447 }
3448
3449 /* copy to the end of the page. */
3450 memcpy(pvDst, pvSrc, cbPage);
3451 PGMPhysReleasePageMappingLock(pVM, &Lock);
3452 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3453 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3454 pvSrc = (const uint8_t *)pvSrc + cbPage;
3455 cb -= cbPage;
3456
3457 /*
3458 * Page by page.
3459 */
3460 for (;;)
3461 {
3462 /* map the page */
3463 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3464 if (RT_FAILURE(rc))
3465 return rc;
3466
3467 /* last page? */
3468 if (cb <= GUEST_PAGE_SIZE)
3469 {
3470 memcpy(pvDst, pvSrc, cb);
3471 PGMPhysReleasePageMappingLock(pVM, &Lock);
3472 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3473 return VINF_SUCCESS;
3474 }
3475
3476 /* copy the entire page and advance */
3477 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3478 PGMPhysReleasePageMappingLock(pVM, &Lock);
3479 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3480 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3481 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3482 cb -= GUEST_PAGE_SIZE;
3483 }
3484 /* won't ever get here. */
3485}
3486
3487
3488/**
3489 * Read from guest physical memory referenced by GC pointer.
3490 *
3491 * This function uses the current CR3/CR0/CR4 of the guest and will
3492 * respect access handlers and set accessed bits.
3493 *
3494 * @returns Strict VBox status, see PGMPhysRead for details.
3495 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3496 * specified virtual address.
3497 *
3498 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3499 * @param pvDst The destination address.
3500 * @param GCPtrSrc The source address (GC pointer).
3501 * @param cb The number of bytes to read.
3502 * @param enmOrigin Who is calling.
3503 * @thread EMT(pVCpu)
3504 */
3505VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3506{
3507 int rc;
3508 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3509 VMCPU_ASSERT_EMT(pVCpu);
3510
3511 /*
3512 * Anything to do?
3513 */
3514 if (!cb)
3515 return VINF_SUCCESS;
3516
3517 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3518
3519 /*
3520 * Optimize reads within a single page.
3521 */
3522 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3523 {
3524 /* Convert virtual to physical address + flags */
3525 PGMPTWALK Walk;
3526 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3527 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3528 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3529
3530 /* mark the guest page as accessed. */
3531 if (!(Walk.fEffective & X86_PTE_A))
3532 {
3533 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3534 AssertRC(rc);
3535 }
3536
3537 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3538 }
3539
3540 /*
3541 * Page by page.
3542 */
3543 for (;;)
3544 {
3545 /* Convert virtual to physical address + flags */
3546 PGMPTWALK Walk;
3547 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3548 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3549 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3550
3551 /* mark the guest page as accessed. */
3552 if (!(Walk.fEffective & X86_PTE_A))
3553 {
3554 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3555 AssertRC(rc);
3556 }
3557
3558 /* copy */
3559 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3560 if (cbRead < cb)
3561 {
3562 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3563 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3564 { /* likely */ }
3565 else
3566 return rcStrict;
3567 }
3568 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3569 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3570
3571 /* next */
3572 Assert(cb > cbRead);
3573 cb -= cbRead;
3574 pvDst = (uint8_t *)pvDst + cbRead;
3575 GCPtrSrc += cbRead;
3576 }
3577}
3578
3579
3580/**
3581 * Write to guest physical memory referenced by GC pointer.
3582 *
3583 * This function uses the current CR3/CR0/CR4 of the guest and will
3584 * respect access handlers and set dirty and accessed bits.
3585 *
3586 * @returns Strict VBox status, see PGMPhysWrite for details.
3587 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3588 * specified virtual address.
3589 *
3590 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3591 * @param GCPtrDst The destination address (GC pointer).
3592 * @param pvSrc The source address.
3593 * @param cb The number of bytes to write.
3594 * @param enmOrigin Who is calling.
3595 */
3596VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3597{
3598 int rc;
3599 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3600 VMCPU_ASSERT_EMT(pVCpu);
3601
3602 /*
3603 * Anything to do?
3604 */
3605 if (!cb)
3606 return VINF_SUCCESS;
3607
3608 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3609
3610 /*
3611 * Optimize writes within a single page.
3612 */
3613 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3614 {
3615 /* Convert virtual to physical address + flags */
3616 PGMPTWALK Walk;
3617 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3618 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3619 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3620
3621 /* Mention when we ignore X86_PTE_RW... */
3622 if (!(Walk.fEffective & X86_PTE_RW))
3623 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3624
3625 /* Mark the guest page as accessed and dirty if necessary. */
3626 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3627 {
3628 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3629 AssertRC(rc);
3630 }
3631
3632 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3633 }
3634
3635 /*
3636 * Page by page.
3637 */
3638 for (;;)
3639 {
3640 /* Convert virtual to physical address + flags */
3641 PGMPTWALK Walk;
3642 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3643 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3644 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3645
3646 /* Mention when we ignore X86_PTE_RW... */
3647 if (!(Walk.fEffective & X86_PTE_RW))
3648 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3649
3650 /* Mark the guest page as accessed and dirty if necessary. */
3651 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3652 {
3653 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3654 AssertRC(rc);
3655 }
3656
3657 /* copy */
3658 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3659 if (cbWrite < cb)
3660 {
3661 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3662 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3663 { /* likely */ }
3664 else
3665 return rcStrict;
3666 }
3667 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3668 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3669
3670 /* next */
3671 Assert(cb > cbWrite);
3672 cb -= cbWrite;
3673 pvSrc = (uint8_t *)pvSrc + cbWrite;
3674 GCPtrDst += cbWrite;
3675 }
3676}
3677
3678
3679/**
3680 * Return the page type of the specified physical address.
3681 *
3682 * @returns The page type.
3683 * @param pVM The cross context VM structure.
3684 * @param GCPhys Guest physical address
3685 */
3686VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3687{
3688 PGM_LOCK_VOID(pVM);
3689 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3690 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3691 PGM_UNLOCK(pVM);
3692
3693 return enmPgType;
3694}
3695
3696
3697/**
3698 * Converts a GC physical address to a HC ring-3 pointer, with some
3699 * additional checks.
3700 *
3701 * @returns VBox status code (no informational statuses).
3702 *
3703 * @param pVM The cross context VM structure.
3704 * @param pVCpu The cross context virtual CPU structure of the
3705 * calling EMT.
3706 * @param GCPhys The GC physical address to convert. This API mask
3707 * the A20 line when necessary.
3708 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3709 * be done while holding the PGM lock.
3710 * @param ppb Where to store the pointer corresponding to GCPhys
3711 * on success.
3712 * @param pfTlb The TLB flags and revision. We only add stuff.
3713 *
3714 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3715 * PGMPhysIemGCPhys2Ptr.
3716 *
3717 * @thread EMT(pVCpu).
3718 */
3719VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3720 R3R0PTRTYPE(uint8_t *) *ppb,
3721 uint64_t *pfTlb)
3722{
3723 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3724 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3725
3726 PGM_LOCK_VOID(pVM);
3727
3728 PPGMRAMRANGE pRam;
3729 PPGMPAGE pPage;
3730 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3731 if (RT_SUCCESS(rc))
3732 {
3733 if (!PGM_PAGE_IS_BALLOONED(pPage))
3734 {
3735 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3736 {
3737 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3738 {
3739 /*
3740 * No access handler.
3741 */
3742 switch (PGM_PAGE_GET_STATE(pPage))
3743 {
3744 case PGM_PAGE_STATE_ALLOCATED:
3745 Assert(!PGM_PAGE_IS_CODE_PAGE(pPage));
3746 *pfTlb |= *puTlbPhysRev;
3747 break;
3748 case PGM_PAGE_STATE_BALLOONED:
3749 AssertFailed();
3750 RT_FALL_THRU();
3751 case PGM_PAGE_STATE_ZERO:
3752 case PGM_PAGE_STATE_SHARED:
3753 case PGM_PAGE_STATE_WRITE_MONITORED:
3754 if (!PGM_PAGE_IS_CODE_PAGE(pPage))
3755 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3756 else
3757 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
3758 break;
3759 }
3760
3761 PPGMPAGEMAPTLBE pTlbe;
3762 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3763 AssertLogRelRCReturn(rc, rc);
3764 *ppb = (uint8_t *)pTlbe->pv;
3765 }
3766 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3767 {
3768 /*
3769 * MMIO or similar all access handler: Catch all access.
3770 */
3771 *pfTlb |= *puTlbPhysRev
3772 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3773 *ppb = NULL;
3774 }
3775 else
3776 {
3777 /*
3778 * Write access handler: Catch write accesses if active.
3779 */
3780 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3781 {
3782 if (!PGM_PAGE_IS_CODE_PAGE(pPage)) /* ROM pages end up here */
3783 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3784 else
3785 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
3786 }
3787 else
3788 switch (PGM_PAGE_GET_STATE(pPage))
3789 {
3790 case PGM_PAGE_STATE_ALLOCATED:
3791 Assert(!PGM_PAGE_IS_CODE_PAGE(pPage));
3792 *pfTlb |= *puTlbPhysRev;
3793 break;
3794 case PGM_PAGE_STATE_BALLOONED:
3795 AssertFailed();
3796 RT_FALL_THRU();
3797 case PGM_PAGE_STATE_ZERO:
3798 case PGM_PAGE_STATE_SHARED:
3799 case PGM_PAGE_STATE_WRITE_MONITORED:
3800 if (!PGM_PAGE_IS_CODE_PAGE(pPage))
3801 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3802 else
3803 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
3804 break;
3805 }
3806
3807 PPGMPAGEMAPTLBE pTlbe;
3808 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3809 AssertLogRelRCReturn(rc, rc);
3810 *ppb = (uint8_t *)pTlbe->pv;
3811 }
3812 }
3813 else
3814 {
3815 /* Alias MMIO: For now, we catch all access. */
3816 *pfTlb |= *puTlbPhysRev
3817 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3818 *ppb = NULL;
3819 }
3820 }
3821 else
3822 {
3823 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3824 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3825 *ppb = NULL;
3826 }
3827 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3828 }
3829 else
3830 {
3831 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ
3832 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 | PGMIEMGCPHYS2PTR_F_UNASSIGNED;
3833 *ppb = NULL;
3834 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3835 }
3836
3837 PGM_UNLOCK(pVM);
3838 return VINF_SUCCESS;
3839}
3840
3841
3842/**
3843 * Converts a GC physical address to a HC ring-3 pointer, with some
3844 * additional checks.
3845 *
3846 * @returns VBox status code (no informational statuses).
3847 * @retval VINF_SUCCESS on success.
3848 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3849 * access handler of some kind.
3850 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3851 * accesses or is odd in any way.
3852 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3853 *
3854 * @param pVM The cross context VM structure.
3855 * @param pVCpu The cross context virtual CPU structure of the
3856 * calling EMT.
3857 * @param GCPhys The GC physical address to convert. This API mask
3858 * the A20 line when necessary.
3859 * @param fWritable Whether write access is required.
3860 * @param fByPassHandlers Whether to bypass access handlers.
3861 * @param ppv Where to store the pointer corresponding to GCPhys
3862 * on success.
3863 * @param pLock
3864 *
3865 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3866 * @thread EMT(pVCpu).
3867 */
3868VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3869 void **ppv, PPGMPAGEMAPLOCK pLock)
3870{
3871 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3872
3873 PGM_LOCK_VOID(pVM);
3874
3875 PPGMRAMRANGE pRam;
3876 PPGMPAGE pPage;
3877 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3878 if (RT_SUCCESS(rc))
3879 {
3880 if (PGM_PAGE_IS_BALLOONED(pPage))
3881 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3882 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3883 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3884 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3885 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3886 rc = VINF_SUCCESS;
3887 else
3888 {
3889 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3890 {
3891 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3892 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3893 }
3894 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3895 {
3896 Assert(!fByPassHandlers);
3897 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3898 }
3899 }
3900 if (RT_SUCCESS(rc))
3901 {
3902 int rc2;
3903
3904 /* Make sure what we return is writable. */
3905 if (fWritable)
3906 switch (PGM_PAGE_GET_STATE(pPage))
3907 {
3908 case PGM_PAGE_STATE_ALLOCATED:
3909 break;
3910 case PGM_PAGE_STATE_BALLOONED:
3911 AssertFailed();
3912 break;
3913 case PGM_PAGE_STATE_ZERO:
3914 case PGM_PAGE_STATE_SHARED:
3915 case PGM_PAGE_STATE_WRITE_MONITORED:
3916 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3917 AssertLogRelRCReturn(rc2, rc2);
3918 break;
3919 }
3920
3921 /* Get a ring-3 mapping of the address. */
3922 PPGMPAGEMAPTLBE pTlbe;
3923 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3924 AssertLogRelRCReturn(rc2, rc2);
3925
3926 /* Lock it and calculate the address. */
3927 if (fWritable)
3928 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3929 else
3930 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3931 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3932
3933 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3934 }
3935 else
3936 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3937
3938 /* else: handler catching all access, no pointer returned. */
3939 }
3940 else
3941 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3942
3943 PGM_UNLOCK(pVM);
3944 return rc;
3945}
3946
3947
3948/**
3949 * Checks if the give GCPhys page requires special handling for the given access
3950 * because it's MMIO or otherwise monitored.
3951 *
3952 * @returns VBox status code (no informational statuses).
3953 * @retval VINF_SUCCESS on success.
3954 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3955 * access handler of some kind.
3956 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3957 * accesses or is odd in any way.
3958 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3959 *
3960 * @param pVM The cross context VM structure.
3961 * @param GCPhys The GC physical address to convert. Since this is
3962 * only used for filling the REM TLB, the A20 mask must
3963 * be applied before calling this API.
3964 * @param fWritable Whether write access is required.
3965 * @param fByPassHandlers Whether to bypass access handlers.
3966 *
3967 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3968 * a stop gap thing that should be removed once there is a better TLB
3969 * for virtual address accesses.
3970 */
3971VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3972{
3973 PGM_LOCK_VOID(pVM);
3974 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3975
3976 PPGMRAMRANGE pRam;
3977 PPGMPAGE pPage;
3978 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3979 if (RT_SUCCESS(rc))
3980 {
3981 if (PGM_PAGE_IS_BALLOONED(pPage))
3982 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3983 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3984 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3985 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3986 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3987 rc = VINF_SUCCESS;
3988 else
3989 {
3990 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3991 {
3992 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3993 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3994 }
3995 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3996 {
3997 Assert(!fByPassHandlers);
3998 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3999 }
4000 }
4001 }
4002
4003 PGM_UNLOCK(pVM);
4004 return rc;
4005}
4006
4007#ifdef VBOX_WITH_NATIVE_NEM
4008
4009/**
4010 * Interface used by NEM to check what to do on a memory access exit.
4011 *
4012 * @returns VBox status code.
4013 * @param pVM The cross context VM structure.
4014 * @param pVCpu The cross context per virtual CPU structure.
4015 * Optional.
4016 * @param GCPhys The guest physical address.
4017 * @param fMakeWritable Whether to try make the page writable or not. If it
4018 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
4019 * be returned and the return code will be unaffected
4020 * @param pInfo Where to return the page information. This is
4021 * initialized even on failure.
4022 * @param pfnChecker Page in-sync checker callback. Optional.
4023 * @param pvUser User argument to pass to pfnChecker.
4024 */
4025VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4026 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4027{
4028 PGM_LOCK_VOID(pVM);
4029
4030 PPGMPAGE pPage;
4031 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4032 if (RT_SUCCESS(rc))
4033 {
4034 /* Try make it writable if requested. */
4035 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4036 if (fMakeWritable)
4037 switch (PGM_PAGE_GET_STATE(pPage))
4038 {
4039 case PGM_PAGE_STATE_SHARED:
4040 case PGM_PAGE_STATE_WRITE_MONITORED:
4041 case PGM_PAGE_STATE_ZERO:
4042 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4043 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4044 rc = VINF_SUCCESS;
4045 break;
4046 }
4047
4048 /* Fill in the info. */
4049 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4050 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4051 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4052 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4053 pInfo->enmType = enmType;
4054 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4055 switch (PGM_PAGE_GET_STATE(pPage))
4056 {
4057 case PGM_PAGE_STATE_ALLOCATED:
4058 pInfo->fZeroPage = 0;
4059 break;
4060
4061 case PGM_PAGE_STATE_ZERO:
4062 pInfo->fZeroPage = 1;
4063 break;
4064
4065 case PGM_PAGE_STATE_WRITE_MONITORED:
4066 pInfo->fZeroPage = 0;
4067 break;
4068
4069 case PGM_PAGE_STATE_SHARED:
4070 pInfo->fZeroPage = 0;
4071 break;
4072
4073 case PGM_PAGE_STATE_BALLOONED:
4074 pInfo->fZeroPage = 1;
4075 break;
4076
4077 default:
4078 pInfo->fZeroPage = 1;
4079 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4080 }
4081
4082 /* Call the checker and update NEM state. */
4083 if (pfnChecker)
4084 {
4085 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4086 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4087 }
4088
4089 /* Done. */
4090 PGM_UNLOCK(pVM);
4091 }
4092 else
4093 {
4094 PGM_UNLOCK(pVM);
4095
4096 pInfo->HCPhys = NIL_RTHCPHYS;
4097 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4098 pInfo->u2NemState = 0;
4099 pInfo->fHasHandlers = 0;
4100 pInfo->fZeroPage = 0;
4101 pInfo->enmType = PGMPAGETYPE_INVALID;
4102 }
4103
4104 return rc;
4105}
4106
4107
4108/**
4109 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4110 * or higher.
4111 *
4112 * @returns VBox status code from callback.
4113 * @param pVM The cross context VM structure.
4114 * @param pVCpu The cross context per CPU structure. This is
4115 * optional as its only for passing to callback.
4116 * @param uMinState The minimum NEM state value to call on.
4117 * @param pfnCallback The callback function.
4118 * @param pvUser User argument for the callback.
4119 */
4120VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4121 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4122{
4123 /*
4124 * Just brute force this problem.
4125 */
4126 PGM_LOCK_VOID(pVM);
4127 int rc = VINF_SUCCESS;
4128 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4129 {
4130 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4131 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4132 {
4133 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4134 if (u2State < uMinState)
4135 { /* likely */ }
4136 else
4137 {
4138 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4139 if (RT_SUCCESS(rc))
4140 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4141 else
4142 break;
4143 }
4144 }
4145 }
4146 PGM_UNLOCK(pVM);
4147
4148 return rc;
4149}
4150
4151
4152/**
4153 * Helper for setting the NEM state for a range of pages.
4154 *
4155 * @param paPages Array of pages to modify.
4156 * @param cPages How many pages to modify.
4157 * @param u2State The new state value.
4158 */
4159void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4160{
4161 PPGMPAGE pPage = paPages;
4162 while (cPages-- > 0)
4163 {
4164 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4165 pPage++;
4166 }
4167}
4168
4169#endif /* VBOX_WITH_NATIVE_NEM */
4170
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette