VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 92296

Last change on this file since 92296 was 92162, checked in by vboxsync, 3 years ago

VMM/PGM,DevVGA: Baked MMIO2 dirty page tracking into PGM, moving it out of DevVGA. Using the handler state to record a page as dirty (PGM_PAGE_HNDL_PHYS_STATE_DISABLED). bugref:10122

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 164.9 KB
Line 
1/* $Id: PGMAllPhys.cpp 92162 2021-10-31 23:34:31Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
51 * Checks if valid physical access handler return code (normal handler, not PF).
52 *
53 * Checks if the given strict status code is one of the expected ones for a
54 * physical access handler in the current context.
55 *
56 * @returns true or false.
57 * @param a_rcStrict The status code.
58 * @param a_fWrite Whether it is a write or read being serviced.
59 *
60 * @remarks We wish to keep the list of statuses here as short as possible.
61 * When changing, please make sure to update the PGMPhysRead,
62 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
63 */
64#ifdef IN_RING3
65# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
66 ( (a_rcStrict) == VINF_SUCCESS \
67 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
68#elif defined(IN_RING0)
69#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
70 ( (a_rcStrict) == VINF_SUCCESS \
71 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
72 \
73 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
74 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
75 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
76 \
77 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
78 || (a_rcStrict) == VINF_EM_DBG_STOP \
79 || (a_rcStrict) == VINF_EM_DBG_EVENT \
80 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
81 || (a_rcStrict) == VINF_EM_OFF \
82 || (a_rcStrict) == VINF_EM_SUSPEND \
83 || (a_rcStrict) == VINF_EM_RESET \
84 )
85#else
86# error "Context?"
87#endif
88
89/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
90 * Checks if valid virtual access handler return code (normal handler, not PF).
91 *
92 * Checks if the given strict status code is one of the expected ones for a
93 * virtual access handler in the current context.
94 *
95 * @returns true or false.
96 * @param a_rcStrict The status code.
97 * @param a_fWrite Whether it is a write or read being serviced.
98 *
99 * @remarks We wish to keep the list of statuses here as short as possible.
100 * When changing, please make sure to update the PGMPhysRead,
101 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
102 */
103#ifdef IN_RING3
104# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
105 ( (a_rcStrict) == VINF_SUCCESS \
106 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
107#elif defined(IN_RING0)
108# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
109 (false /* no virtual handlers in ring-0! */ )
110#else
111# error "Context?"
112#endif
113
114
115
116#ifndef IN_RING3
117
118/**
119 * @callback_method_impl{FNPGMPHYSHANDLER,
120 * Dummy for forcing ring-3 handling of the access.}
121 */
122DECLEXPORT(VBOXSTRICTRC)
123pgmPhysHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
124 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
125{
126 NOREF(pVM); NOREF(pVCpu); NOREF(GCPhys); NOREF(pvPhys); NOREF(pvBuf); NOREF(cbBuf);
127 NOREF(enmAccessType); NOREF(enmOrigin); NOREF(pvUser);
128 return VINF_EM_RAW_EMULATE_INSTR;
129}
130
131
132/**
133 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
134 * Dummy for forcing ring-3 handling of the access.}
135 */
136VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
137 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
138{
139 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
140 return VINF_EM_RAW_EMULATE_INSTR;
141}
142
143
144/**
145 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
146 * \#PF access handler callback for guest ROM range write access.}
147 *
148 * @remarks The @a pvUser argument points to the PGMROMRANGE.
149 */
150DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
151 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
152{
153 int rc;
154 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
155 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
156 NOREF(uErrorCode); NOREF(pvFault);
157
158 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
159
160 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
161 switch (pRom->aPages[iPage].enmProt)
162 {
163 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
164 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
165 {
166 /*
167 * If it's a simple instruction which doesn't change the cpu state
168 * we will simply skip it. Otherwise we'll have to defer it to REM.
169 */
170 uint32_t cbOp;
171 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
172 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
173 if ( RT_SUCCESS(rc)
174 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
175 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
176 {
177 switch (pDis->bOpCode)
178 {
179 /** @todo Find other instructions we can safely skip, possibly
180 * adding this kind of detection to DIS or EM. */
181 case OP_MOV:
182 pRegFrame->rip += cbOp;
183 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
184 return VINF_SUCCESS;
185 }
186 }
187 break;
188 }
189
190 case PGMROMPROT_READ_RAM_WRITE_RAM:
191 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
192 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
193 AssertRC(rc);
194 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
195
196 case PGMROMPROT_READ_ROM_WRITE_RAM:
197 /* Handle it in ring-3 because it's *way* easier there. */
198 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
199 break;
200
201 default:
202 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
203 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
204 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
205 }
206
207 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
208 return VINF_EM_RAW_EMULATE_INSTR;
209}
210
211#endif /* !IN_RING3 */
212
213
214/**
215 * @callback_method_impl{FNPGMPHYSHANDLER,
216 * Access handler callback for ROM write accesses.}
217 *
218 * @remarks The @a pvUser argument points to the PGMROMRANGE.
219 */
220PGM_ALL_CB2_DECL(VBOXSTRICTRC)
221pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
222 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
223{
224 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
225 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
226 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
227 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
228 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
229 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);
230
231 if (enmAccessType == PGMACCESSTYPE_READ)
232 {
233 switch (pRomPage->enmProt)
234 {
235 /*
236 * Take the default action.
237 */
238 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
239 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
240 case PGMROMPROT_READ_ROM_WRITE_RAM:
241 case PGMROMPROT_READ_RAM_WRITE_RAM:
242 return VINF_PGM_HANDLER_DO_DEFAULT;
243
244 default:
245 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
246 pRom->aPages[iPage].enmProt, iPage, GCPhys),
247 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
248 }
249 }
250 else
251 {
252 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
253 switch (pRomPage->enmProt)
254 {
255 /*
256 * Ignore writes.
257 */
258 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
259 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
260 return VINF_SUCCESS;
261
262 /*
263 * Write to the RAM page.
264 */
265 case PGMROMPROT_READ_ROM_WRITE_RAM:
266 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
267 {
268 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
269 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
270
271 /*
272 * Take the lock, do lazy allocation, map the page and copy the data.
273 *
274 * Note that we have to bypass the mapping TLB since it works on
275 * guest physical addresses and entering the shadow page would
276 * kind of screw things up...
277 */
278 PGM_LOCK_VOID(pVM);
279
280 PPGMPAGE pShadowPage = &pRomPage->Shadow;
281 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
282 {
283 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
284 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
285 }
286
287 void *pvDstPage;
288 int rc;
289#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
290 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
291 {
292 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
293 rc = VINF_SUCCESS;
294 }
295 else
296#endif
297 {
298 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
299 if (RT_SUCCESS(rc))
300 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK);
301 }
302 if (RT_SUCCESS(rc))
303 {
304 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
305 pRomPage->LiveSave.fWrittenTo = true;
306
307 AssertMsg( rc == VINF_SUCCESS
308 || ( rc == VINF_PGM_SYNC_CR3
309 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
310 , ("%Rrc\n", rc));
311 rc = VINF_SUCCESS;
312 }
313
314 PGM_UNLOCK(pVM);
315 return rc;
316 }
317
318 default:
319 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
320 pRom->aPages[iPage].enmProt, iPage, GCPhys),
321 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
322 }
323 }
324}
325
326
327/**
328 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
329 */
330static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uintptr_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
331{
332 /*
333 * Get the MMIO2 range.
334 */
335 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
336 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
337 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
338 Assert(pMmio2->idMmio2 == hMmio2);
339 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
340 VERR_INTERNAL_ERROR_4);
341
342 /*
343 * Get the page and make sure it's an MMIO2 page.
344 */
345 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
346 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
347 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
348
349 /*
350 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
351 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
352 * page is dirty, saving the need for additional storage (bitmap).)
353 */
354 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
355
356 /*
357 * Disable the handler for this page.
358 */
359 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
360 AssertRC(rc);
361#ifndef IN_RING3
362 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
363 {
364 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
365 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
366 }
367#else
368 RT_NOREF(pVCpu, GCPtr);
369#endif
370 return VINF_SUCCESS;
371}
372
373
374#ifndef IN_RING3
375/**
376 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
377 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
378 *
379 * @remarks The @a pvUser is the MMIO2 index.
380 */
381DECLEXPORT(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
382 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
383{
384 RT_NOREF(pVCpu, uErrorCode, pRegFrame);
385 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
386 if (RT_SUCCESS(rcStrict))
387 {
388 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhysFault, pvFault);
389 PGM_UNLOCK(pVM);
390 }
391 return rcStrict;
392}
393#endif /* !IN_RING3 */
394
395
396/**
397 * @callback_method_impl{FNPGMPHYSHANDLER,
398 * Access handler callback for MMIO2 dirty page tracing.}
399 *
400 * @remarks The @a pvUser is the MMIO2 index.
401 */
402PGM_ALL_CB2_DECL(VBOXSTRICTRC)
403pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
404 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
405{
406 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
407 if (RT_SUCCESS(rcStrict))
408 {
409 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhys, ~(RTGCPTR)0);
410 PGM_UNLOCK(pVM);
411 if (rcStrict == VINF_SUCCESS)
412 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
413 }
414 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
415 return rcStrict;
416}
417
418
419/**
420 * Invalidates the RAM range TLBs.
421 *
422 * @param pVM The cross context VM structure.
423 */
424void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
425{
426 PGM_LOCK_VOID(pVM);
427 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
428 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
429 PGM_UNLOCK(pVM);
430}
431
432
433/**
434 * Tests if a value of type RTGCPHYS is negative if the type had been signed
435 * instead of unsigned.
436 *
437 * @returns @c true if negative, @c false if positive or zero.
438 * @param a_GCPhys The value to test.
439 * @todo Move me to iprt/types.h.
440 */
441#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
442
443
444/**
445 * Slow worker for pgmPhysGetRange.
446 *
447 * @copydoc pgmPhysGetRange
448 */
449PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
450{
451 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
452
453 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
454 while (pRam)
455 {
456 RTGCPHYS off = GCPhys - pRam->GCPhys;
457 if (off < pRam->cb)
458 {
459 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
460 return pRam;
461 }
462 if (RTGCPHYS_IS_NEGATIVE(off))
463 pRam = pRam->CTX_SUFF(pLeft);
464 else
465 pRam = pRam->CTX_SUFF(pRight);
466 }
467 return NULL;
468}
469
470
471/**
472 * Slow worker for pgmPhysGetRangeAtOrAbove.
473 *
474 * @copydoc pgmPhysGetRangeAtOrAbove
475 */
476PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
477{
478 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
479
480 PPGMRAMRANGE pLastLeft = NULL;
481 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
482 while (pRam)
483 {
484 RTGCPHYS off = GCPhys - pRam->GCPhys;
485 if (off < pRam->cb)
486 {
487 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
488 return pRam;
489 }
490 if (RTGCPHYS_IS_NEGATIVE(off))
491 {
492 pLastLeft = pRam;
493 pRam = pRam->CTX_SUFF(pLeft);
494 }
495 else
496 pRam = pRam->CTX_SUFF(pRight);
497 }
498 return pLastLeft;
499}
500
501
502/**
503 * Slow worker for pgmPhysGetPage.
504 *
505 * @copydoc pgmPhysGetPage
506 */
507PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
508{
509 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
510
511 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
512 while (pRam)
513 {
514 RTGCPHYS off = GCPhys - pRam->GCPhys;
515 if (off < pRam->cb)
516 {
517 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
518 return &pRam->aPages[off >> PAGE_SHIFT];
519 }
520
521 if (RTGCPHYS_IS_NEGATIVE(off))
522 pRam = pRam->CTX_SUFF(pLeft);
523 else
524 pRam = pRam->CTX_SUFF(pRight);
525 }
526 return NULL;
527}
528
529
530/**
531 * Slow worker for pgmPhysGetPageEx.
532 *
533 * @copydoc pgmPhysGetPageEx
534 */
535int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
536{
537 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
538
539 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
540 while (pRam)
541 {
542 RTGCPHYS off = GCPhys - pRam->GCPhys;
543 if (off < pRam->cb)
544 {
545 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
546 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
547 return VINF_SUCCESS;
548 }
549
550 if (RTGCPHYS_IS_NEGATIVE(off))
551 pRam = pRam->CTX_SUFF(pLeft);
552 else
553 pRam = pRam->CTX_SUFF(pRight);
554 }
555
556 *ppPage = NULL;
557 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
558}
559
560
561/**
562 * Slow worker for pgmPhysGetPageAndRangeEx.
563 *
564 * @copydoc pgmPhysGetPageAndRangeEx
565 */
566int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
567{
568 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
569
570 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
571 while (pRam)
572 {
573 RTGCPHYS off = GCPhys - pRam->GCPhys;
574 if (off < pRam->cb)
575 {
576 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
577 *ppRam = pRam;
578 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
579 return VINF_SUCCESS;
580 }
581
582 if (RTGCPHYS_IS_NEGATIVE(off))
583 pRam = pRam->CTX_SUFF(pLeft);
584 else
585 pRam = pRam->CTX_SUFF(pRight);
586 }
587
588 *ppRam = NULL;
589 *ppPage = NULL;
590 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
591}
592
593
594/**
595 * Checks if Address Gate 20 is enabled or not.
596 *
597 * @returns true if enabled.
598 * @returns false if disabled.
599 * @param pVCpu The cross context virtual CPU structure.
600 */
601VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
602{
603 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
604 return pVCpu->pgm.s.fA20Enabled;
605}
606
607
608/**
609 * Validates a GC physical address.
610 *
611 * @returns true if valid.
612 * @returns false if invalid.
613 * @param pVM The cross context VM structure.
614 * @param GCPhys The physical address to validate.
615 */
616VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
617{
618 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
619 return pPage != NULL;
620}
621
622
623/**
624 * Checks if a GC physical address is a normal page,
625 * i.e. not ROM, MMIO or reserved.
626 *
627 * @returns true if normal.
628 * @returns false if invalid, ROM, MMIO or reserved page.
629 * @param pVM The cross context VM structure.
630 * @param GCPhys The physical address to check.
631 */
632VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
633{
634 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
635 return pPage
636 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
637}
638
639
640/**
641 * Converts a GC physical address to a HC physical address.
642 *
643 * @returns VINF_SUCCESS on success.
644 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
645 * page but has no physical backing.
646 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
647 * GC physical address.
648 *
649 * @param pVM The cross context VM structure.
650 * @param GCPhys The GC physical address to convert.
651 * @param pHCPhys Where to store the HC physical address on success.
652 */
653VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
654{
655 PGM_LOCK_VOID(pVM);
656 PPGMPAGE pPage;
657 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
658 if (RT_SUCCESS(rc))
659 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
660 PGM_UNLOCK(pVM);
661 return rc;
662}
663
664
665/**
666 * Invalidates all page mapping TLBs.
667 *
668 * @param pVM The cross context VM structure.
669 */
670void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
671{
672 PGM_LOCK_VOID(pVM);
673 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
674
675 /* Clear the R3 & R0 TLBs completely. */
676 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
677 {
678 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
679 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
680 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
681 }
682
683 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
684 {
685 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
686 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
687 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
688 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
689 }
690
691 PGM_UNLOCK(pVM);
692}
693
694
695/**
696 * Invalidates a page mapping TLB entry
697 *
698 * @param pVM The cross context VM structure.
699 * @param GCPhys GCPhys entry to flush
700 */
701void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
702{
703 PGM_LOCK_ASSERT_OWNER(pVM);
704
705 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
706
707 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
708
709 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
710 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
711 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
712
713 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
714 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
715 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
716 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
717}
718
719
720/**
721 * Makes sure that there is at least one handy page ready for use.
722 *
723 * This will also take the appropriate actions when reaching water-marks.
724 *
725 * @returns VBox status code.
726 * @retval VINF_SUCCESS on success.
727 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
728 *
729 * @param pVM The cross context VM structure.
730 *
731 * @remarks Must be called from within the PGM critical section. It may
732 * nip back to ring-3/0 in some cases.
733 */
734static int pgmPhysEnsureHandyPage(PVMCC pVM)
735{
736 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
737
738 /*
739 * Do we need to do anything special?
740 */
741#ifdef IN_RING3
742 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
743#else
744 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
745#endif
746 {
747 /*
748 * Allocate pages only if we're out of them, or in ring-3, almost out.
749 */
750#ifdef IN_RING3
751 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
752#else
753 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
754#endif
755 {
756 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
757 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
758#ifdef IN_RING3
759 int rc = PGMR3PhysAllocateHandyPages(pVM);
760#else
761 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
762#endif
763 if (RT_UNLIKELY(rc != VINF_SUCCESS))
764 {
765 if (RT_FAILURE(rc))
766 return rc;
767 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
768 if (!pVM->pgm.s.cHandyPages)
769 {
770 LogRel(("PGM: no more handy pages!\n"));
771 return VERR_EM_NO_MEMORY;
772 }
773 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
774 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
775#ifndef IN_RING3
776 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
777#endif
778 }
779 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
780 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
781 ("%u\n", pVM->pgm.s.cHandyPages),
782 VERR_PGM_HANDY_PAGE_IPE);
783 }
784 else
785 {
786 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
787 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
788#ifndef IN_RING3
789 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
790 {
791 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
792 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
793 }
794#endif
795 }
796 }
797
798 return VINF_SUCCESS;
799}
800
801
802
803/**
804 * Replace a zero or shared page with new page that we can write to.
805 *
806 * @returns The following VBox status codes.
807 * @retval VINF_SUCCESS on success, pPage is modified.
808 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
809 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
810 *
811 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
812 *
813 * @param pVM The cross context VM structure.
814 * @param pPage The physical page tracking structure. This will
815 * be modified on success.
816 * @param GCPhys The address of the page.
817 *
818 * @remarks Must be called from within the PGM critical section. It may
819 * nip back to ring-3/0 in some cases.
820 *
821 * @remarks This function shouldn't really fail, however if it does
822 * it probably means we've screwed up the size of handy pages and/or
823 * the low-water mark. Or, that some device I/O is causing a lot of
824 * pages to be allocated while while the host is in a low-memory
825 * condition. This latter should be handled elsewhere and in a more
826 * controlled manner, it's on the @bugref{3170} todo list...
827 */
828int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
829{
830 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
831
832 /*
833 * Prereqs.
834 */
835 PGM_LOCK_ASSERT_OWNER(pVM);
836 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
837 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
838
839# ifdef PGM_WITH_LARGE_PAGES
840 /*
841 * Try allocate a large page if applicable.
842 */
843 if ( PGMIsUsingLargePages(pVM)
844 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
845 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
846 {
847 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
848 PPGMPAGE pBasePage;
849
850 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
851 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
852 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
853 {
854 rc = pgmPhysAllocLargePage(pVM, GCPhys);
855 if (rc == VINF_SUCCESS)
856 return rc;
857 }
858 /* Mark the base as type page table, so we don't check over and over again. */
859 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
860
861 /* fall back to 4KB pages. */
862 }
863# endif
864
865 /*
866 * Flush any shadow page table mappings of the page.
867 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
868 */
869 bool fFlushTLBs = false;
870 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
871 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
872
873 /*
874 * Ensure that we've got a page handy, take it and use it.
875 */
876 int rc2 = pgmPhysEnsureHandyPage(pVM);
877 if (RT_FAILURE(rc2))
878 {
879 if (fFlushTLBs)
880 PGM_INVL_ALL_VCPU_TLBS(pVM);
881 Assert(rc2 == VERR_EM_NO_MEMORY);
882 return rc2;
883 }
884 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
885 PGM_LOCK_ASSERT_OWNER(pVM);
886 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
887 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
888
889 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
890 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
891 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
892 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
893 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
894 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
895
896 /*
897 * There are one or two action to be taken the next time we allocate handy pages:
898 * - Tell the GMM (global memory manager) what the page is being used for.
899 * (Speeds up replacement operations - sharing and defragmenting.)
900 * - If the current backing is shared, it must be freed.
901 */
902 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
903 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
904
905 void const *pvSharedPage = NULL;
906 if (PGM_PAGE_IS_SHARED(pPage))
907 {
908 /* Mark this shared page for freeing/dereferencing. */
909 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
910 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
911
912 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
913 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
914 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
915 pVM->pgm.s.cSharedPages--;
916
917 /* Grab the address of the page so we can make a copy later on. (safe) */
918 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
919 AssertRC(rc);
920 }
921 else
922 {
923 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
924 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
925 pVM->pgm.s.cZeroPages--;
926 }
927
928 /*
929 * Do the PGMPAGE modifications.
930 */
931 pVM->pgm.s.cPrivatePages++;
932 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
933 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
934 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
935 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
936 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
937
938 /* Copy the shared page contents to the replacement page. */
939 if (pvSharedPage)
940 {
941 /* Get the virtual address of the new page. */
942 PGMPAGEMAPLOCK PgMpLck;
943 void *pvNewPage;
944 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
945 if (RT_SUCCESS(rc))
946 {
947 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
948 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
949 }
950 }
951
952 if ( fFlushTLBs
953 && rc != VINF_PGM_GCPHYS_ALIASED)
954 PGM_INVL_ALL_VCPU_TLBS(pVM);
955
956 /*
957 * Notify NEM about the mapping change for this page.
958 *
959 * Note! Shadow ROM pages are complicated as they can definitely be
960 * allocated while not visible, so play safe.
961 */
962 if (VM_IS_NEM_ENABLED(pVM))
963 {
964 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
965 if ( enmType != PGMPAGETYPE_ROM_SHADOW
966 || pgmPhysGetPage(pVM, GCPhys) == pPage)
967 {
968 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
969 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
970 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
971 if (RT_SUCCESS(rc))
972 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
973 else
974 rc = rc2;
975 }
976 }
977
978 return rc;
979}
980
981#ifdef PGM_WITH_LARGE_PAGES
982
983/**
984 * Replace a 2 MB range of zero pages with new pages that we can write to.
985 *
986 * @returns The following VBox status codes.
987 * @retval VINF_SUCCESS on success, pPage is modified.
988 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
989 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
990 *
991 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
992 *
993 * @param pVM The cross context VM structure.
994 * @param GCPhys The address of the page.
995 *
996 * @remarks Must be called from within the PGM critical section. It may
997 * nip back to ring-3/0 in some cases.
998 */
999int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1000{
1001 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1002 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1003 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1004
1005 /*
1006 * Prereqs.
1007 */
1008 PGM_LOCK_ASSERT_OWNER(pVM);
1009 Assert(PGMIsUsingLargePages(pVM));
1010
1011 PPGMPAGE pFirstPage;
1012 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1013 if ( RT_SUCCESS(rc)
1014 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
1015 {
1016 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1017
1018 /* Don't call this function for already allocated pages. */
1019 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1020
1021 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
1022 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1023 {
1024 /* Lazy approach: check all pages in the 2 MB range.
1025 * The whole range must be ram and unallocated. */
1026 GCPhys = GCPhysBase;
1027 unsigned iPage;
1028 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
1029 {
1030 PPGMPAGE pSubPage;
1031 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
1032 if ( RT_FAILURE(rc)
1033 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1034 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1035 {
1036 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
1037 break;
1038 }
1039 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1040 GCPhys += PAGE_SIZE;
1041 }
1042 if (iPage != _2M/PAGE_SIZE)
1043 {
1044 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1045 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1046 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1047 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1048 }
1049
1050 /*
1051 * Do the allocation.
1052 */
1053# ifdef IN_RING3
1054 rc = PGMR3PhysAllocateLargePage(pVM, GCPhysBase);
1055# else
1056 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
1057# endif
1058 if (RT_SUCCESS(rc))
1059 {
1060 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1061 pVM->pgm.s.cLargePages++;
1062 return VINF_SUCCESS;
1063 }
1064
1065 /* If we fail once, it most likely means the host's memory is too
1066 fragmented; don't bother trying again. */
1067 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1068 if (rc != VERR_TRY_AGAIN)
1069 PGMSetLargePageUsage(pVM, false);
1070 return rc;
1071 }
1072 }
1073 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1074}
1075
1076
1077/**
1078 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1079 *
1080 * @returns The following VBox status codes.
1081 * @retval VINF_SUCCESS on success, the large page can be used again
1082 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1083 *
1084 * @param pVM The cross context VM structure.
1085 * @param GCPhys The address of the page.
1086 * @param pLargePage Page structure of the base page
1087 */
1088int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1089{
1090 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1091
1092 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1093
1094 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1095
1096 /* Check the base page. */
1097 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1098 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1099 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1100 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1101 {
1102 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1103 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1104 }
1105
1106 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1107 /* Check all remaining pages in the 2 MB range. */
1108 unsigned i;
1109 GCPhys += PAGE_SIZE;
1110 for (i = 1; i < _2M/PAGE_SIZE; i++)
1111 {
1112 PPGMPAGE pPage;
1113 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1114 AssertRCBreak(rc);
1115
1116 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1117 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1118 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1119 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1120 {
1121 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1122 break;
1123 }
1124
1125 GCPhys += PAGE_SIZE;
1126 }
1127 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1128
1129 if (i == _2M/PAGE_SIZE)
1130 {
1131 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1132 pVM->pgm.s.cLargePagesDisabled--;
1133 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1134 return VINF_SUCCESS;
1135 }
1136
1137 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1138}
1139
1140#endif /* PGM_WITH_LARGE_PAGES */
1141
1142
1143/**
1144 * Deal with a write monitored page.
1145 *
1146 * @returns VBox strict status code.
1147 *
1148 * @param pVM The cross context VM structure.
1149 * @param pPage The physical page tracking structure.
1150 * @param GCPhys The guest physical address of the page.
1151 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1152 * very unlikely situation where it is okay that we let NEM
1153 * fix the page access in a lazy fasion.
1154 *
1155 * @remarks Called from within the PGM critical section.
1156 */
1157void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1158{
1159 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1160 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1161 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1162 Assert(pVM->pgm.s.cMonitoredPages > 0);
1163 pVM->pgm.s.cMonitoredPages--;
1164 pVM->pgm.s.cWrittenToPages++;
1165
1166#ifdef VBOX_WITH_NATIVE_NEM
1167 /*
1168 * Notify NEM about the protection change so we won't spin forever.
1169 *
1170 * Note! NEM need to be handle to lazily correct page protection as we cannot
1171 * really get it 100% right here it seems. The page pool does this too.
1172 */
1173 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1174 {
1175 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1176 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1177 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1178 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1179 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1180 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1181 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1182 }
1183#else
1184 RT_NOREF(GCPhys);
1185#endif
1186}
1187
1188
1189/**
1190 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1191 *
1192 * @returns VBox strict status code.
1193 * @retval VINF_SUCCESS on success.
1194 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1195 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1196 *
1197 * @param pVM The cross context VM structure.
1198 * @param pPage The physical page tracking structure.
1199 * @param GCPhys The address of the page.
1200 *
1201 * @remarks Called from within the PGM critical section.
1202 */
1203int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1204{
1205 PGM_LOCK_ASSERT_OWNER(pVM);
1206 switch (PGM_PAGE_GET_STATE(pPage))
1207 {
1208 case PGM_PAGE_STATE_WRITE_MONITORED:
1209 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1210 RT_FALL_THRU();
1211 default: /* to shut up GCC */
1212 case PGM_PAGE_STATE_ALLOCATED:
1213 return VINF_SUCCESS;
1214
1215 /*
1216 * Zero pages can be dummy pages for MMIO or reserved memory,
1217 * so we need to check the flags before joining cause with
1218 * shared page replacement.
1219 */
1220 case PGM_PAGE_STATE_ZERO:
1221 if (PGM_PAGE_IS_MMIO(pPage))
1222 return VERR_PGM_PHYS_PAGE_RESERVED;
1223 RT_FALL_THRU();
1224 case PGM_PAGE_STATE_SHARED:
1225 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1226
1227 /* Not allowed to write to ballooned pages. */
1228 case PGM_PAGE_STATE_BALLOONED:
1229 return VERR_PGM_PHYS_PAGE_BALLOONED;
1230 }
1231}
1232
1233
1234/**
1235 * Internal usage: Map the page specified by its GMM ID.
1236 *
1237 * This is similar to pgmPhysPageMap
1238 *
1239 * @returns VBox status code.
1240 *
1241 * @param pVM The cross context VM structure.
1242 * @param idPage The Page ID.
1243 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1244 * @param ppv Where to store the mapping address.
1245 *
1246 * @remarks Called from within the PGM critical section. The mapping is only
1247 * valid while you are inside this section.
1248 */
1249int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1250{
1251 /*
1252 * Validation.
1253 */
1254 PGM_LOCK_ASSERT_OWNER(pVM);
1255 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1256 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1257 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1258
1259#ifdef IN_RING0
1260# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1261 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
1262# else
1263 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1264# endif
1265
1266#else
1267 /*
1268 * Find/make Chunk TLB entry for the mapping chunk.
1269 */
1270 PPGMCHUNKR3MAP pMap;
1271 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1272 if (pTlbe->idChunk == idChunk)
1273 {
1274 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1275 pMap = pTlbe->pChunk;
1276 }
1277 else
1278 {
1279 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1280
1281 /*
1282 * Find the chunk, map it if necessary.
1283 */
1284 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1285 if (pMap)
1286 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1287 else
1288 {
1289 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1290 if (RT_FAILURE(rc))
1291 return rc;
1292 }
1293
1294 /*
1295 * Enter it into the Chunk TLB.
1296 */
1297 pTlbe->idChunk = idChunk;
1298 pTlbe->pChunk = pMap;
1299 }
1300
1301 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1302 return VINF_SUCCESS;
1303#endif
1304}
1305
1306
1307/**
1308 * Maps a page into the current virtual address space so it can be accessed.
1309 *
1310 * @returns VBox status code.
1311 * @retval VINF_SUCCESS on success.
1312 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1313 *
1314 * @param pVM The cross context VM structure.
1315 * @param pPage The physical page tracking structure.
1316 * @param GCPhys The address of the page.
1317 * @param ppMap Where to store the address of the mapping tracking structure.
1318 * @param ppv Where to store the mapping address of the page. The page
1319 * offset is masked off!
1320 *
1321 * @remarks Called from within the PGM critical section.
1322 */
1323static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1324{
1325 PGM_LOCK_ASSERT_OWNER(pVM);
1326 NOREF(GCPhys);
1327
1328 /*
1329 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1330 */
1331 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1332 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1333 {
1334 /* Decode the page id to a page in a MMIO2 ram range. */
1335 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1336 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1337 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1338 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1339 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1340 pPage->s.idPage, pPage->s.uStateY),
1341 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1342 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1343 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1344 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1345 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1346 *ppMap = NULL;
1347# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1348 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1349# elif defined(IN_RING0)
1350 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << PAGE_SHIFT);
1351 return VINF_SUCCESS;
1352# else
1353 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1354 return VINF_SUCCESS;
1355# endif
1356 }
1357
1358# ifdef VBOX_WITH_PGM_NEM_MODE
1359 if (pVM->pgm.s.fNemMode)
1360 {
1361# ifdef IN_RING3
1362 /*
1363 * Find the corresponding RAM range and use that to locate the mapping address.
1364 */
1365 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1366 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1367 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1368 size_t const idxPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1369 Assert(pPage == &pRam->aPages[idxPage]);
1370 *ppMap = NULL;
1371 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << PAGE_SHIFT);
1372 return VINF_SUCCESS;
1373# else
1374 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1375# endif
1376 }
1377# endif
1378
1379 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1380 if (idChunk == NIL_GMM_CHUNKID)
1381 {
1382 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1383 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1384 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1385 {
1386 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1387 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1388 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1389 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1390 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1391 }
1392 else
1393 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1394 *ppMap = NULL;
1395 return VINF_SUCCESS;
1396 }
1397
1398# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1399 /*
1400 * Just use the physical address.
1401 */
1402 *ppMap = NULL;
1403 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1404
1405# elif defined(IN_RING0)
1406 /*
1407 * Go by page ID thru GMMR0.
1408 */
1409 *ppMap = NULL;
1410 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1411
1412# else
1413 /*
1414 * Find/make Chunk TLB entry for the mapping chunk.
1415 */
1416 PPGMCHUNKR3MAP pMap;
1417 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1418 if (pTlbe->idChunk == idChunk)
1419 {
1420 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1421 pMap = pTlbe->pChunk;
1422 AssertPtr(pMap->pv);
1423 }
1424 else
1425 {
1426 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1427
1428 /*
1429 * Find the chunk, map it if necessary.
1430 */
1431 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1432 if (pMap)
1433 {
1434 AssertPtr(pMap->pv);
1435 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1436 }
1437 else
1438 {
1439 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1440 if (RT_FAILURE(rc))
1441 return rc;
1442 AssertPtr(pMap->pv);
1443 }
1444
1445 /*
1446 * Enter it into the Chunk TLB.
1447 */
1448 pTlbe->idChunk = idChunk;
1449 pTlbe->pChunk = pMap;
1450 }
1451
1452 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1453 *ppMap = pMap;
1454 return VINF_SUCCESS;
1455# endif /* !IN_RING0 */
1456}
1457
1458
1459/**
1460 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1461 *
1462 * This is typically used is paths where we cannot use the TLB methods (like ROM
1463 * pages) or where there is no point in using them since we won't get many hits.
1464 *
1465 * @returns VBox strict status code.
1466 * @retval VINF_SUCCESS on success.
1467 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1468 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1469 *
1470 * @param pVM The cross context VM structure.
1471 * @param pPage The physical page tracking structure.
1472 * @param GCPhys The address of the page.
1473 * @param ppv Where to store the mapping address of the page. The page
1474 * offset is masked off!
1475 *
1476 * @remarks Called from within the PGM critical section. The mapping is only
1477 * valid while you are inside section.
1478 */
1479int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1480{
1481 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1482 if (RT_SUCCESS(rc))
1483 {
1484 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1485 PPGMPAGEMAP pMapIgnore;
1486 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1487 if (RT_FAILURE(rc2)) /* preserve rc */
1488 rc = rc2;
1489 }
1490 return rc;
1491}
1492
1493
1494/**
1495 * Maps a page into the current virtual address space so it can be accessed for
1496 * both writing and reading.
1497 *
1498 * This is typically used is paths where we cannot use the TLB methods (like ROM
1499 * pages) or where there is no point in using them since we won't get many hits.
1500 *
1501 * @returns VBox status code.
1502 * @retval VINF_SUCCESS on success.
1503 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1504 *
1505 * @param pVM The cross context VM structure.
1506 * @param pPage The physical page tracking structure. Must be in the
1507 * allocated state.
1508 * @param GCPhys The address of the page.
1509 * @param ppv Where to store the mapping address of the page. The page
1510 * offset is masked off!
1511 *
1512 * @remarks Called from within the PGM critical section. The mapping is only
1513 * valid while you are inside section.
1514 */
1515int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1516{
1517 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1518 PPGMPAGEMAP pMapIgnore;
1519 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1520}
1521
1522
1523/**
1524 * Maps a page into the current virtual address space so it can be accessed for
1525 * reading.
1526 *
1527 * This is typically used is paths where we cannot use the TLB methods (like ROM
1528 * pages) or where there is no point in using them since we won't get many hits.
1529 *
1530 * @returns VBox status code.
1531 * @retval VINF_SUCCESS on success.
1532 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1533 *
1534 * @param pVM The cross context VM structure.
1535 * @param pPage The physical page tracking structure.
1536 * @param GCPhys The address of the page.
1537 * @param ppv Where to store the mapping address of the page. The page
1538 * offset is masked off!
1539 *
1540 * @remarks Called from within the PGM critical section. The mapping is only
1541 * valid while you are inside this section.
1542 */
1543int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1544{
1545 PPGMPAGEMAP pMapIgnore;
1546 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1547}
1548
1549
1550/**
1551 * Load a guest page into the ring-3 physical TLB.
1552 *
1553 * @returns VBox status code.
1554 * @retval VINF_SUCCESS on success
1555 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1556 * @param pVM The cross context VM structure.
1557 * @param GCPhys The guest physical address in question.
1558 */
1559int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1560{
1561 PGM_LOCK_ASSERT_OWNER(pVM);
1562
1563 /*
1564 * Find the ram range and page and hand it over to the with-page function.
1565 * 99.8% of requests are expected to be in the first range.
1566 */
1567 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1568 if (!pPage)
1569 {
1570 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1571 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1572 }
1573
1574 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1575}
1576
1577
1578/**
1579 * Load a guest page into the ring-3 physical TLB.
1580 *
1581 * @returns VBox status code.
1582 * @retval VINF_SUCCESS on success
1583 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1584 *
1585 * @param pVM The cross context VM structure.
1586 * @param pPage Pointer to the PGMPAGE structure corresponding to
1587 * GCPhys.
1588 * @param GCPhys The guest physical address in question.
1589 */
1590int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1591{
1592 PGM_LOCK_ASSERT_OWNER(pVM);
1593 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1594
1595 /*
1596 * Map the page.
1597 * Make a special case for the zero page as it is kind of special.
1598 */
1599 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1600 if ( !PGM_PAGE_IS_ZERO(pPage)
1601 && !PGM_PAGE_IS_BALLOONED(pPage))
1602 {
1603 void *pv;
1604 PPGMPAGEMAP pMap;
1605 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1606 if (RT_FAILURE(rc))
1607 return rc;
1608# ifndef IN_RING0
1609 pTlbe->pMap = pMap;
1610# endif
1611 pTlbe->pv = pv;
1612 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1613 }
1614 else
1615 {
1616 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1617# ifndef IN_RING0
1618 pTlbe->pMap = NULL;
1619# endif
1620 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1621 }
1622# ifdef PGM_WITH_PHYS_TLB
1623 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1624 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1625 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1626 else
1627 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1628# else
1629 pTlbe->GCPhys = NIL_RTGCPHYS;
1630# endif
1631 pTlbe->pPage = pPage;
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1638 * own the PGM lock and therefore not need to lock the mapped page.
1639 *
1640 * @returns VBox status code.
1641 * @retval VINF_SUCCESS on success.
1642 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1643 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1644 *
1645 * @param pVM The cross context VM structure.
1646 * @param GCPhys The guest physical address of the page that should be mapped.
1647 * @param pPage Pointer to the PGMPAGE structure for the page.
1648 * @param ppv Where to store the address corresponding to GCPhys.
1649 *
1650 * @internal
1651 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1652 */
1653int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1654{
1655 int rc;
1656 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1657 PGM_LOCK_ASSERT_OWNER(pVM);
1658 pVM->pgm.s.cDeprecatedPageLocks++;
1659
1660 /*
1661 * Make sure the page is writable.
1662 */
1663 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1664 {
1665 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1666 if (RT_FAILURE(rc))
1667 return rc;
1668 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1669 }
1670 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1671
1672 /*
1673 * Get the mapping address.
1674 */
1675 PPGMPAGEMAPTLBE pTlbe;
1676 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1677 if (RT_FAILURE(rc))
1678 return rc;
1679 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1680 return VINF_SUCCESS;
1681}
1682
1683
1684/**
1685 * Locks a page mapping for writing.
1686 *
1687 * @param pVM The cross context VM structure.
1688 * @param pPage The page.
1689 * @param pTlbe The mapping TLB entry for the page.
1690 * @param pLock The lock structure (output).
1691 */
1692DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1693{
1694# ifndef IN_RING0
1695 PPGMPAGEMAP pMap = pTlbe->pMap;
1696 if (pMap)
1697 pMap->cRefs++;
1698# else
1699 RT_NOREF(pTlbe);
1700# endif
1701
1702 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1703 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1704 {
1705 if (cLocks == 0)
1706 pVM->pgm.s.cWriteLockedPages++;
1707 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1708 }
1709 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1710 {
1711 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1712 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1713# ifndef IN_RING0
1714 if (pMap)
1715 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1716# endif
1717 }
1718
1719 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1720# ifndef IN_RING0
1721 pLock->pvMap = pMap;
1722# else
1723 pLock->pvMap = NULL;
1724# endif
1725}
1726
1727/**
1728 * Locks a page mapping for reading.
1729 *
1730 * @param pVM The cross context VM structure.
1731 * @param pPage The page.
1732 * @param pTlbe The mapping TLB entry for the page.
1733 * @param pLock The lock structure (output).
1734 */
1735DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1736{
1737# ifndef IN_RING0
1738 PPGMPAGEMAP pMap = pTlbe->pMap;
1739 if (pMap)
1740 pMap->cRefs++;
1741# else
1742 RT_NOREF(pTlbe);
1743# endif
1744
1745 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1746 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1747 {
1748 if (cLocks == 0)
1749 pVM->pgm.s.cReadLockedPages++;
1750 PGM_PAGE_INC_READ_LOCKS(pPage);
1751 }
1752 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1753 {
1754 PGM_PAGE_INC_READ_LOCKS(pPage);
1755 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1756# ifndef IN_RING0
1757 if (pMap)
1758 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1759# endif
1760 }
1761
1762 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1763# ifndef IN_RING0
1764 pLock->pvMap = pMap;
1765# else
1766 pLock->pvMap = NULL;
1767# endif
1768}
1769
1770
1771/**
1772 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1773 * own the PGM lock and have access to the page structure.
1774 *
1775 * @returns VBox status code.
1776 * @retval VINF_SUCCESS on success.
1777 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1778 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1779 *
1780 * @param pVM The cross context VM structure.
1781 * @param GCPhys The guest physical address of the page that should be mapped.
1782 * @param pPage Pointer to the PGMPAGE structure for the page.
1783 * @param ppv Where to store the address corresponding to GCPhys.
1784 * @param pLock Where to store the lock information that
1785 * pgmPhysReleaseInternalPageMappingLock needs.
1786 *
1787 * @internal
1788 */
1789int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1790{
1791 int rc;
1792 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1793 PGM_LOCK_ASSERT_OWNER(pVM);
1794
1795 /*
1796 * Make sure the page is writable.
1797 */
1798 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1799 {
1800 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1801 if (RT_FAILURE(rc))
1802 return rc;
1803 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1804 }
1805 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1806
1807 /*
1808 * Do the job.
1809 */
1810 PPGMPAGEMAPTLBE pTlbe;
1811 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1812 if (RT_FAILURE(rc))
1813 return rc;
1814 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1815 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1816 return VINF_SUCCESS;
1817}
1818
1819
1820/**
1821 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1822 * own the PGM lock and have access to the page structure.
1823 *
1824 * @returns VBox status code.
1825 * @retval VINF_SUCCESS on success.
1826 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1827 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1828 *
1829 * @param pVM The cross context VM structure.
1830 * @param GCPhys The guest physical address of the page that should be mapped.
1831 * @param pPage Pointer to the PGMPAGE structure for the page.
1832 * @param ppv Where to store the address corresponding to GCPhys.
1833 * @param pLock Where to store the lock information that
1834 * pgmPhysReleaseInternalPageMappingLock needs.
1835 *
1836 * @internal
1837 */
1838int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1839{
1840 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1841 PGM_LOCK_ASSERT_OWNER(pVM);
1842 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1843
1844 /*
1845 * Do the job.
1846 */
1847 PPGMPAGEMAPTLBE pTlbe;
1848 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1849 if (RT_FAILURE(rc))
1850 return rc;
1851 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1852 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1853 return VINF_SUCCESS;
1854}
1855
1856
1857/**
1858 * Requests the mapping of a guest page into the current context.
1859 *
1860 * This API should only be used for very short term, as it will consume scarse
1861 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1862 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1863 *
1864 * This API will assume your intention is to write to the page, and will
1865 * therefore replace shared and zero pages. If you do not intend to modify
1866 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1867 *
1868 * @returns VBox status code.
1869 * @retval VINF_SUCCESS on success.
1870 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1871 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1872 *
1873 * @param pVM The cross context VM structure.
1874 * @param GCPhys The guest physical address of the page that should be
1875 * mapped.
1876 * @param ppv Where to store the address corresponding to GCPhys.
1877 * @param pLock Where to store the lock information that
1878 * PGMPhysReleasePageMappingLock needs.
1879 *
1880 * @remarks The caller is responsible for dealing with access handlers.
1881 * @todo Add an informational return code for pages with access handlers?
1882 *
1883 * @remark Avoid calling this API from within critical sections (other than
1884 * the PGM one) because of the deadlock risk. External threads may
1885 * need to delegate jobs to the EMTs.
1886 * @remarks Only one page is mapped! Make no assumption about what's after or
1887 * before the returned page!
1888 * @thread Any thread.
1889 */
1890VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1891{
1892 int rc = PGM_LOCK(pVM);
1893 AssertRCReturn(rc, rc);
1894
1895 /*
1896 * Query the Physical TLB entry for the page (may fail).
1897 */
1898 PPGMPAGEMAPTLBE pTlbe;
1899 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1900 if (RT_SUCCESS(rc))
1901 {
1902 /*
1903 * If the page is shared, the zero page, or being write monitored
1904 * it must be converted to a page that's writable if possible.
1905 */
1906 PPGMPAGE pPage = pTlbe->pPage;
1907 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1908 {
1909 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1910 if (RT_SUCCESS(rc))
1911 {
1912 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1913 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1914 }
1915 }
1916 if (RT_SUCCESS(rc))
1917 {
1918 /*
1919 * Now, just perform the locking and calculate the return address.
1920 */
1921 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1922 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1923 }
1924 }
1925
1926 PGM_UNLOCK(pVM);
1927 return rc;
1928}
1929
1930
1931/**
1932 * Requests the mapping of a guest page into the current context.
1933 *
1934 * This API should only be used for very short term, as it will consume scarse
1935 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1936 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1937 *
1938 * @returns VBox status code.
1939 * @retval VINF_SUCCESS on success.
1940 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1941 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1942 *
1943 * @param pVM The cross context VM structure.
1944 * @param GCPhys The guest physical address of the page that should be
1945 * mapped.
1946 * @param ppv Where to store the address corresponding to GCPhys.
1947 * @param pLock Where to store the lock information that
1948 * PGMPhysReleasePageMappingLock needs.
1949 *
1950 * @remarks The caller is responsible for dealing with access handlers.
1951 * @todo Add an informational return code for pages with access handlers?
1952 *
1953 * @remarks Avoid calling this API from within critical sections (other than
1954 * the PGM one) because of the deadlock risk.
1955 * @remarks Only one page is mapped! Make no assumption about what's after or
1956 * before the returned page!
1957 * @thread Any thread.
1958 */
1959VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1960{
1961 int rc = PGM_LOCK(pVM);
1962 AssertRCReturn(rc, rc);
1963
1964 /*
1965 * Query the Physical TLB entry for the page (may fail).
1966 */
1967 PPGMPAGEMAPTLBE pTlbe;
1968 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1969 if (RT_SUCCESS(rc))
1970 {
1971 /* MMIO pages doesn't have any readable backing. */
1972 PPGMPAGE pPage = pTlbe->pPage;
1973 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1974 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1975 else
1976 {
1977 /*
1978 * Now, just perform the locking and calculate the return address.
1979 */
1980 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1981 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1982 }
1983 }
1984
1985 PGM_UNLOCK(pVM);
1986 return rc;
1987}
1988
1989
1990/**
1991 * Requests the mapping of a guest page given by virtual address into the current context.
1992 *
1993 * This API should only be used for very short term, as it will consume
1994 * scarse resources (R0 and GC) in the mapping cache. When you're done
1995 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1996 *
1997 * This API will assume your intention is to write to the page, and will
1998 * therefore replace shared and zero pages. If you do not intend to modify
1999 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2000 *
2001 * @returns VBox status code.
2002 * @retval VINF_SUCCESS on success.
2003 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2004 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2005 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2006 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2007 *
2008 * @param pVCpu The cross context virtual CPU structure.
2009 * @param GCPtr The guest physical address of the page that should be
2010 * mapped.
2011 * @param ppv Where to store the address corresponding to GCPhys.
2012 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2013 *
2014 * @remark Avoid calling this API from within critical sections (other than
2015 * the PGM one) because of the deadlock risk.
2016 * @thread EMT
2017 */
2018VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2019{
2020 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2021 RTGCPHYS GCPhys;
2022 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2023 if (RT_SUCCESS(rc))
2024 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2025 return rc;
2026}
2027
2028
2029/**
2030 * Requests the mapping of a guest page given by virtual address into the current context.
2031 *
2032 * This API should only be used for very short term, as it will consume
2033 * scarse resources (R0 and GC) in the mapping cache. When you're done
2034 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2035 *
2036 * @returns VBox status code.
2037 * @retval VINF_SUCCESS on success.
2038 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2039 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2040 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2041 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2042 *
2043 * @param pVCpu The cross context virtual CPU structure.
2044 * @param GCPtr The guest physical address of the page that should be
2045 * mapped.
2046 * @param ppv Where to store the address corresponding to GCPtr.
2047 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2048 *
2049 * @remark Avoid calling this API from within critical sections (other than
2050 * the PGM one) because of the deadlock risk.
2051 * @thread EMT
2052 */
2053VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2054{
2055 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2056 RTGCPHYS GCPhys;
2057 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2058 if (RT_SUCCESS(rc))
2059 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2060 return rc;
2061}
2062
2063
2064/**
2065 * Release the mapping of a guest page.
2066 *
2067 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2068 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2069 *
2070 * @param pVM The cross context VM structure.
2071 * @param pLock The lock structure initialized by the mapping function.
2072 */
2073VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2074{
2075# ifndef IN_RING0
2076 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2077# endif
2078 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2079 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2080
2081 pLock->uPageAndType = 0;
2082 pLock->pvMap = NULL;
2083
2084 PGM_LOCK_VOID(pVM);
2085 if (fWriteLock)
2086 {
2087 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2088 Assert(cLocks > 0);
2089 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2090 {
2091 if (cLocks == 1)
2092 {
2093 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2094 pVM->pgm.s.cWriteLockedPages--;
2095 }
2096 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2097 }
2098
2099 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2100 { /* probably extremely likely */ }
2101 else
2102 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2103 }
2104 else
2105 {
2106 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2107 Assert(cLocks > 0);
2108 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2109 {
2110 if (cLocks == 1)
2111 {
2112 Assert(pVM->pgm.s.cReadLockedPages > 0);
2113 pVM->pgm.s.cReadLockedPages--;
2114 }
2115 PGM_PAGE_DEC_READ_LOCKS(pPage);
2116 }
2117 }
2118
2119# ifndef IN_RING0
2120 if (pMap)
2121 {
2122 Assert(pMap->cRefs >= 1);
2123 pMap->cRefs--;
2124 }
2125# endif
2126 PGM_UNLOCK(pVM);
2127}
2128
2129
2130#ifdef IN_RING3
2131/**
2132 * Release the mapping of multiple guest pages.
2133 *
2134 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2135 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2136 *
2137 * @param pVM The cross context VM structure.
2138 * @param cPages Number of pages to unlock.
2139 * @param paLocks Array of locks lock structure initialized by the mapping
2140 * function.
2141 */
2142VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2143{
2144 Assert(cPages > 0);
2145 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2146#ifdef VBOX_STRICT
2147 for (uint32_t i = 1; i < cPages; i++)
2148 {
2149 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2150 AssertPtr(paLocks[i].uPageAndType);
2151 }
2152#endif
2153
2154 PGM_LOCK_VOID(pVM);
2155 if (fWriteLock)
2156 {
2157 /*
2158 * Write locks:
2159 */
2160 for (uint32_t i = 0; i < cPages; i++)
2161 {
2162 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2163 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2164 Assert(cLocks > 0);
2165 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2166 {
2167 if (cLocks == 1)
2168 {
2169 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2170 pVM->pgm.s.cWriteLockedPages--;
2171 }
2172 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2173 }
2174
2175 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2176 { /* probably extremely likely */ }
2177 else
2178 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2179
2180 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2181 if (pMap)
2182 {
2183 Assert(pMap->cRefs >= 1);
2184 pMap->cRefs--;
2185 }
2186
2187 /* Yield the lock: */
2188 if ((i & 1023) == 1023 && i + 1 < cPages)
2189 {
2190 PGM_UNLOCK(pVM);
2191 PGM_LOCK_VOID(pVM);
2192 }
2193 }
2194 }
2195 else
2196 {
2197 /*
2198 * Read locks:
2199 */
2200 for (uint32_t i = 0; i < cPages; i++)
2201 {
2202 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2203 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2204 Assert(cLocks > 0);
2205 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2206 {
2207 if (cLocks == 1)
2208 {
2209 Assert(pVM->pgm.s.cReadLockedPages > 0);
2210 pVM->pgm.s.cReadLockedPages--;
2211 }
2212 PGM_PAGE_DEC_READ_LOCKS(pPage);
2213 }
2214
2215 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2216 if (pMap)
2217 {
2218 Assert(pMap->cRefs >= 1);
2219 pMap->cRefs--;
2220 }
2221
2222 /* Yield the lock: */
2223 if ((i & 1023) == 1023 && i + 1 < cPages)
2224 {
2225 PGM_UNLOCK(pVM);
2226 PGM_LOCK_VOID(pVM);
2227 }
2228 }
2229 }
2230 PGM_UNLOCK(pVM);
2231
2232 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2233}
2234#endif /* IN_RING3 */
2235
2236
2237/**
2238 * Release the internal mapping of a guest page.
2239 *
2240 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2241 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2242 *
2243 * @param pVM The cross context VM structure.
2244 * @param pLock The lock structure initialized by the mapping function.
2245 *
2246 * @remarks Caller must hold the PGM lock.
2247 */
2248void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2249{
2250 PGM_LOCK_ASSERT_OWNER(pVM);
2251 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2252}
2253
2254
2255/**
2256 * Converts a GC physical address to a HC ring-3 pointer.
2257 *
2258 * @returns VINF_SUCCESS on success.
2259 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2260 * page but has no physical backing.
2261 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2262 * GC physical address.
2263 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2264 * a dynamic ram chunk boundary
2265 *
2266 * @param pVM The cross context VM structure.
2267 * @param GCPhys The GC physical address to convert.
2268 * @param pR3Ptr Where to store the R3 pointer on success.
2269 *
2270 * @deprecated Avoid when possible!
2271 */
2272int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2273{
2274/** @todo this is kind of hacky and needs some more work. */
2275#ifndef DEBUG_sandervl
2276 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2277#endif
2278
2279 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2280 PGM_LOCK_VOID(pVM);
2281
2282 PPGMRAMRANGE pRam;
2283 PPGMPAGE pPage;
2284 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2285 if (RT_SUCCESS(rc))
2286 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2287
2288 PGM_UNLOCK(pVM);
2289 Assert(rc <= VINF_SUCCESS);
2290 return rc;
2291}
2292
2293
2294/**
2295 * Converts a guest pointer to a GC physical address.
2296 *
2297 * This uses the current CR3/CR0/CR4 of the guest.
2298 *
2299 * @returns VBox status code.
2300 * @param pVCpu The cross context virtual CPU structure.
2301 * @param GCPtr The guest pointer to convert.
2302 * @param pGCPhys Where to store the GC physical address.
2303 */
2304VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2305{
2306 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2307 if (pGCPhys && RT_SUCCESS(rc))
2308 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2309 return rc;
2310}
2311
2312
2313/**
2314 * Converts a guest pointer to a HC physical address.
2315 *
2316 * This uses the current CR3/CR0/CR4 of the guest.
2317 *
2318 * @returns VBox status code.
2319 * @param pVCpu The cross context virtual CPU structure.
2320 * @param GCPtr The guest pointer to convert.
2321 * @param pHCPhys Where to store the HC physical address.
2322 */
2323VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2324{
2325 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2326 RTGCPHYS GCPhys;
2327 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2328 if (RT_SUCCESS(rc))
2329 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2330 return rc;
2331}
2332
2333
2334
2335#undef LOG_GROUP
2336#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2337
2338
2339#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2340/**
2341 * Cache PGMPhys memory access
2342 *
2343 * @param pVM The cross context VM structure.
2344 * @param pCache Cache structure pointer
2345 * @param GCPhys GC physical address
2346 * @param pbHC HC pointer corresponding to physical page
2347 *
2348 * @thread EMT.
2349 */
2350static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2351{
2352 uint32_t iCacheIndex;
2353
2354 Assert(VM_IS_EMT(pVM));
2355
2356 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2357 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2358
2359 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2360
2361 ASMBitSet(&pCache->aEntries, iCacheIndex);
2362
2363 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2364 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2365}
2366#endif /* IN_RING3 */
2367
2368
2369/**
2370 * Deals with reading from a page with one or more ALL access handlers.
2371 *
2372 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2373 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2374 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2375 *
2376 * @param pVM The cross context VM structure.
2377 * @param pPage The page descriptor.
2378 * @param GCPhys The physical address to start reading at.
2379 * @param pvBuf Where to put the bits we read.
2380 * @param cb How much to read - less or equal to a page.
2381 * @param enmOrigin The origin of this call.
2382 */
2383static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2384 PGMACCESSORIGIN enmOrigin)
2385{
2386 /*
2387 * The most frequent access here is MMIO and shadowed ROM.
2388 * The current code ASSUMES all these access handlers covers full pages!
2389 */
2390
2391 /*
2392 * Whatever we do we need the source page, map it first.
2393 */
2394 PGMPAGEMAPLOCK PgMpLck;
2395 const void *pvSrc = NULL;
2396 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2397/** @todo Check how this can work for MMIO pages? */
2398 if (RT_FAILURE(rc))
2399 {
2400 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2401 GCPhys, pPage, rc));
2402 memset(pvBuf, 0xff, cb);
2403 return VINF_SUCCESS;
2404 }
2405
2406 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2407
2408 /*
2409 * Deal with any physical handlers.
2410 */
2411 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2412 PPGMPHYSHANDLER pPhys = NULL;
2413 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2414 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2415 {
2416 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2417 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2418 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2419 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2420 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2421#ifndef IN_RING3
2422 if (enmOrigin != PGMACCESSORIGIN_IEM)
2423 {
2424 /* Cannot reliably handle informational status codes in this context */
2425 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2426 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2427 }
2428#endif
2429 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2430 void *pvUser = pPhys->CTX_SUFF(pvUser);
2431
2432 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2433 STAM_PROFILE_START(&pPhys->Stat, h);
2434 PGM_LOCK_ASSERT_OWNER(pVM);
2435
2436 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2437 PGM_UNLOCK(pVM);
2438 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2439 PGM_LOCK_VOID(pVM);
2440
2441#ifdef VBOX_WITH_STATISTICS
2442 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2443 if (pPhys)
2444 STAM_PROFILE_STOP(&pPhys->Stat, h);
2445#else
2446 pPhys = NULL; /* might not be valid anymore. */
2447#endif
2448 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2449 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2450 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2451 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2452 {
2453 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2454 return rcStrict;
2455 }
2456 }
2457
2458 /*
2459 * Take the default action.
2460 */
2461 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2462 {
2463 memcpy(pvBuf, pvSrc, cb);
2464 rcStrict = VINF_SUCCESS;
2465 }
2466 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2467 return rcStrict;
2468}
2469
2470
2471/**
2472 * Read physical memory.
2473 *
2474 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2475 * want to ignore those.
2476 *
2477 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2478 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2479 * @retval VINF_SUCCESS in all context - read completed.
2480 *
2481 * @retval VINF_EM_OFF in RC and R0 - read completed.
2482 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2483 * @retval VINF_EM_RESET in RC and R0 - read completed.
2484 * @retval VINF_EM_HALT in RC and R0 - read completed.
2485 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2486 *
2487 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2488 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2489 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2490 *
2491 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2492 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2493 *
2494 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2495 *
2496 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2497 * haven't been cleared for strict status codes yet.
2498 *
2499 * @param pVM The cross context VM structure.
2500 * @param GCPhys Physical address start reading from.
2501 * @param pvBuf Where to put the read bits.
2502 * @param cbRead How many bytes to read.
2503 * @param enmOrigin The origin of this call.
2504 */
2505VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2506{
2507 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2508 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2509
2510 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2511 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2512
2513 PGM_LOCK_VOID(pVM);
2514
2515 /*
2516 * Copy loop on ram ranges.
2517 */
2518 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2519 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2520 for (;;)
2521 {
2522 /* Inside range or not? */
2523 if (pRam && GCPhys >= pRam->GCPhys)
2524 {
2525 /*
2526 * Must work our way thru this page by page.
2527 */
2528 RTGCPHYS off = GCPhys - pRam->GCPhys;
2529 while (off < pRam->cb)
2530 {
2531 unsigned iPage = off >> PAGE_SHIFT;
2532 PPGMPAGE pPage = &pRam->aPages[iPage];
2533 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2534 if (cb > cbRead)
2535 cb = cbRead;
2536
2537 /*
2538 * Normal page? Get the pointer to it.
2539 */
2540 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2541 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2542 {
2543 /*
2544 * Get the pointer to the page.
2545 */
2546 PGMPAGEMAPLOCK PgMpLck;
2547 const void *pvSrc;
2548 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2549 if (RT_SUCCESS(rc))
2550 {
2551 memcpy(pvBuf, pvSrc, cb);
2552 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2553 }
2554 else
2555 {
2556 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2557 pRam->GCPhys + off, pPage, rc));
2558 memset(pvBuf, 0xff, cb);
2559 }
2560 }
2561 /*
2562 * Have ALL/MMIO access handlers.
2563 */
2564 else
2565 {
2566 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2567 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2568 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2569 else
2570 {
2571 memset(pvBuf, 0xff, cb);
2572 PGM_UNLOCK(pVM);
2573 return rcStrict2;
2574 }
2575 }
2576
2577 /* next page */
2578 if (cb >= cbRead)
2579 {
2580 PGM_UNLOCK(pVM);
2581 return rcStrict;
2582 }
2583 cbRead -= cb;
2584 off += cb;
2585 pvBuf = (char *)pvBuf + cb;
2586 } /* walk pages in ram range. */
2587
2588 GCPhys = pRam->GCPhysLast + 1;
2589 }
2590 else
2591 {
2592 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2593
2594 /*
2595 * Unassigned address space.
2596 */
2597 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2598 if (cb >= cbRead)
2599 {
2600 memset(pvBuf, 0xff, cbRead);
2601 break;
2602 }
2603 memset(pvBuf, 0xff, cb);
2604
2605 cbRead -= cb;
2606 pvBuf = (char *)pvBuf + cb;
2607 GCPhys += cb;
2608 }
2609
2610 /* Advance range if necessary. */
2611 while (pRam && GCPhys > pRam->GCPhysLast)
2612 pRam = pRam->CTX_SUFF(pNext);
2613 } /* Ram range walk */
2614
2615 PGM_UNLOCK(pVM);
2616 return rcStrict;
2617}
2618
2619
2620/**
2621 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2622 *
2623 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2624 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2625 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2626 *
2627 * @param pVM The cross context VM structure.
2628 * @param pPage The page descriptor.
2629 * @param GCPhys The physical address to start writing at.
2630 * @param pvBuf What to write.
2631 * @param cbWrite How much to write - less or equal to a page.
2632 * @param enmOrigin The origin of this call.
2633 */
2634static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2635 PGMACCESSORIGIN enmOrigin)
2636{
2637 PGMPAGEMAPLOCK PgMpLck;
2638 void *pvDst = NULL;
2639 VBOXSTRICTRC rcStrict;
2640
2641 /*
2642 * Give priority to physical handlers (like #PF does).
2643 *
2644 * Hope for a lonely physical handler first that covers the whole
2645 * write area. This should be a pretty frequent case with MMIO and
2646 * the heavy usage of full page handlers in the page pool.
2647 */
2648 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2649 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2650 if (pCur)
2651 {
2652 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2653#ifndef IN_RING3
2654 if (enmOrigin != PGMACCESSORIGIN_IEM)
2655 /* Cannot reliably handle informational status codes in this context */
2656 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2657#endif
2658 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2659 if (cbRange > cbWrite)
2660 cbRange = cbWrite;
2661
2662 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2663 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2664 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2665 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2666 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2667 else
2668 rcStrict = VINF_SUCCESS;
2669 if (RT_SUCCESS(rcStrict))
2670 {
2671 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
2672 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler);
2673 void * const pvUser = pCur->CTX_SUFF(pvUser);
2674 STAM_PROFILE_START(&pCur->Stat, h);
2675
2676 /* Most handlers will want to release the PGM lock for deadlock prevention
2677 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2678 dirty page trackers will want to keep it for performance reasons. */
2679 PGM_LOCK_ASSERT_OWNER(pVM);
2680 if (pCurType->fKeepPgmLock)
2681 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2682 else
2683 {
2684 PGM_UNLOCK(pVM);
2685 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2686 PGM_LOCK_VOID(pVM);
2687 }
2688
2689#ifdef VBOX_WITH_STATISTICS
2690 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2691 if (pCur)
2692 STAM_PROFILE_STOP(&pCur->Stat, h);
2693#else
2694 pCur = NULL; /* might not be valid anymore. */
2695#endif
2696 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2697 {
2698 if (pvDst)
2699 memcpy(pvDst, pvBuf, cbRange);
2700 rcStrict = VINF_SUCCESS;
2701 }
2702 else
2703 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2704 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2705 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2706 }
2707 else
2708 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2709 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2710 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2711 {
2712 if (pvDst)
2713 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2714 return rcStrict;
2715 }
2716
2717 /* more fun to be had below */
2718 cbWrite -= cbRange;
2719 GCPhys += cbRange;
2720 pvBuf = (uint8_t *)pvBuf + cbRange;
2721 pvDst = (uint8_t *)pvDst + cbRange;
2722 }
2723 else /* The handler is somewhere else in the page, deal with it below. */
2724 rcStrict = VINF_SUCCESS;
2725 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2726
2727 /*
2728 * Deal with all the odd ends (used to be deal with virt+phys).
2729 */
2730 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2731
2732 /* We need a writable destination page. */
2733 if (!pvDst)
2734 {
2735 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2736 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2737 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2738 rc2);
2739 }
2740
2741 /* The loop state (big + ugly). */
2742 PPGMPHYSHANDLER pPhys = NULL;
2743 uint32_t offPhys = PAGE_SIZE;
2744 uint32_t offPhysLast = PAGE_SIZE;
2745 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2746
2747 /* The loop. */
2748 for (;;)
2749 {
2750 if (fMorePhys && !pPhys)
2751 {
2752 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2753 if (pPhys)
2754 {
2755 offPhys = 0;
2756 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2757 }
2758 else
2759 {
2760 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2761 GCPhys, true /* fAbove */);
2762 if ( pPhys
2763 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2764 {
2765 offPhys = pPhys->Core.Key - GCPhys;
2766 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2767 }
2768 else
2769 {
2770 pPhys = NULL;
2771 fMorePhys = false;
2772 offPhys = offPhysLast = PAGE_SIZE;
2773 }
2774 }
2775 }
2776
2777 /*
2778 * Handle access to space without handlers (that's easy).
2779 */
2780 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2781 uint32_t cbRange = (uint32_t)cbWrite;
2782
2783 /*
2784 * Physical handler.
2785 */
2786 if (!offPhys)
2787 {
2788#ifndef IN_RING3
2789 if (enmOrigin != PGMACCESSORIGIN_IEM)
2790 /* Cannot reliably handle informational status codes in this context */
2791 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2792#endif
2793 if (cbRange > offPhysLast + 1)
2794 cbRange = offPhysLast + 1;
2795
2796 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys);
2797 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler);
2798 void * const pvUser = pPhys->CTX_SUFF(pvUser);
2799
2800 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2801 STAM_PROFILE_START(&pPhys->Stat, h);
2802
2803 /* Most handlers will want to release the PGM lock for deadlock prevention
2804 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2805 dirty page trackers will want to keep it for performance reasons. */
2806 PGM_LOCK_ASSERT_OWNER(pVM);
2807 if (pCurType->fKeepPgmLock)
2808 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2809 else
2810 {
2811 PGM_UNLOCK(pVM);
2812 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2813 PGM_LOCK_VOID(pVM);
2814 }
2815
2816#ifdef VBOX_WITH_STATISTICS
2817 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2818 if (pPhys)
2819 STAM_PROFILE_STOP(&pPhys->Stat, h);
2820#else
2821 pPhys = NULL; /* might not be valid anymore. */
2822#endif
2823 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2824 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2825 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2826 }
2827
2828 /*
2829 * Execute the default action and merge the status codes.
2830 */
2831 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2832 {
2833 memcpy(pvDst, pvBuf, cbRange);
2834 rcStrict2 = VINF_SUCCESS;
2835 }
2836 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2837 {
2838 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2839 return rcStrict2;
2840 }
2841 else
2842 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2843
2844 /*
2845 * Advance if we've got more stuff to do.
2846 */
2847 if (cbRange >= cbWrite)
2848 {
2849 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2850 return rcStrict;
2851 }
2852
2853
2854 cbWrite -= cbRange;
2855 GCPhys += cbRange;
2856 pvBuf = (uint8_t *)pvBuf + cbRange;
2857 pvDst = (uint8_t *)pvDst + cbRange;
2858
2859 offPhys -= cbRange;
2860 offPhysLast -= cbRange;
2861 }
2862}
2863
2864
2865/**
2866 * Write to physical memory.
2867 *
2868 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2869 * want to ignore those.
2870 *
2871 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2872 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2873 * @retval VINF_SUCCESS in all context - write completed.
2874 *
2875 * @retval VINF_EM_OFF in RC and R0 - write completed.
2876 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2877 * @retval VINF_EM_RESET in RC and R0 - write completed.
2878 * @retval VINF_EM_HALT in RC and R0 - write completed.
2879 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2880 *
2881 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2882 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2883 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2884 *
2885 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2886 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2887 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2888 *
2889 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2890 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2891 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2892 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2893 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2894 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2895 *
2896 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2897 * haven't been cleared for strict status codes yet.
2898 *
2899 *
2900 * @param pVM The cross context VM structure.
2901 * @param GCPhys Physical address to write to.
2902 * @param pvBuf What to write.
2903 * @param cbWrite How many bytes to write.
2904 * @param enmOrigin Who is calling.
2905 */
2906VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2907{
2908 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2909 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2910 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2911
2912 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2913 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2914
2915 PGM_LOCK_VOID(pVM);
2916
2917 /*
2918 * Copy loop on ram ranges.
2919 */
2920 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2921 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2922 for (;;)
2923 {
2924 /* Inside range or not? */
2925 if (pRam && GCPhys >= pRam->GCPhys)
2926 {
2927 /*
2928 * Must work our way thru this page by page.
2929 */
2930 RTGCPTR off = GCPhys - pRam->GCPhys;
2931 while (off < pRam->cb)
2932 {
2933 RTGCPTR iPage = off >> PAGE_SHIFT;
2934 PPGMPAGE pPage = &pRam->aPages[iPage];
2935 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2936 if (cb > cbWrite)
2937 cb = cbWrite;
2938
2939 /*
2940 * Normal page? Get the pointer to it.
2941 */
2942 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2943 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2944 {
2945 PGMPAGEMAPLOCK PgMpLck;
2946 void *pvDst;
2947 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2948 if (RT_SUCCESS(rc))
2949 {
2950 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2951 memcpy(pvDst, pvBuf, cb);
2952 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2953 }
2954 /* Ignore writes to ballooned pages. */
2955 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2956 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2957 pRam->GCPhys + off, pPage, rc));
2958 }
2959 /*
2960 * Active WRITE or ALL access handlers.
2961 */
2962 else
2963 {
2964 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2965 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2966 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2967 else
2968 {
2969 PGM_UNLOCK(pVM);
2970 return rcStrict2;
2971 }
2972 }
2973
2974 /* next page */
2975 if (cb >= cbWrite)
2976 {
2977 PGM_UNLOCK(pVM);
2978 return rcStrict;
2979 }
2980
2981 cbWrite -= cb;
2982 off += cb;
2983 pvBuf = (const char *)pvBuf + cb;
2984 } /* walk pages in ram range */
2985
2986 GCPhys = pRam->GCPhysLast + 1;
2987 }
2988 else
2989 {
2990 /*
2991 * Unassigned address space, skip it.
2992 */
2993 if (!pRam)
2994 break;
2995 size_t cb = pRam->GCPhys - GCPhys;
2996 if (cb >= cbWrite)
2997 break;
2998 cbWrite -= cb;
2999 pvBuf = (const char *)pvBuf + cb;
3000 GCPhys += cb;
3001 }
3002
3003 /* Advance range if necessary. */
3004 while (pRam && GCPhys > pRam->GCPhysLast)
3005 pRam = pRam->CTX_SUFF(pNext);
3006 } /* Ram range walk */
3007
3008 PGM_UNLOCK(pVM);
3009 return rcStrict;
3010}
3011
3012
3013/**
3014 * Read from guest physical memory by GC physical address, bypassing
3015 * MMIO and access handlers.
3016 *
3017 * @returns VBox status code.
3018 * @param pVM The cross context VM structure.
3019 * @param pvDst The destination address.
3020 * @param GCPhysSrc The source address (GC physical address).
3021 * @param cb The number of bytes to read.
3022 */
3023VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3024{
3025 /*
3026 * Treat the first page as a special case.
3027 */
3028 if (!cb)
3029 return VINF_SUCCESS;
3030
3031 /* map the 1st page */
3032 void const *pvSrc;
3033 PGMPAGEMAPLOCK Lock;
3034 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3035 if (RT_FAILURE(rc))
3036 return rc;
3037
3038 /* optimize for the case where access is completely within the first page. */
3039 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
3040 if (RT_LIKELY(cb <= cbPage))
3041 {
3042 memcpy(pvDst, pvSrc, cb);
3043 PGMPhysReleasePageMappingLock(pVM, &Lock);
3044 return VINF_SUCCESS;
3045 }
3046
3047 /* copy to the end of the page. */
3048 memcpy(pvDst, pvSrc, cbPage);
3049 PGMPhysReleasePageMappingLock(pVM, &Lock);
3050 GCPhysSrc += cbPage;
3051 pvDst = (uint8_t *)pvDst + cbPage;
3052 cb -= cbPage;
3053
3054 /*
3055 * Page by page.
3056 */
3057 for (;;)
3058 {
3059 /* map the page */
3060 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3061 if (RT_FAILURE(rc))
3062 return rc;
3063
3064 /* last page? */
3065 if (cb <= PAGE_SIZE)
3066 {
3067 memcpy(pvDst, pvSrc, cb);
3068 PGMPhysReleasePageMappingLock(pVM, &Lock);
3069 return VINF_SUCCESS;
3070 }
3071
3072 /* copy the entire page and advance */
3073 memcpy(pvDst, pvSrc, PAGE_SIZE);
3074 PGMPhysReleasePageMappingLock(pVM, &Lock);
3075 GCPhysSrc += PAGE_SIZE;
3076 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3077 cb -= PAGE_SIZE;
3078 }
3079 /* won't ever get here. */
3080}
3081
3082
3083/**
3084 * Write to guest physical memory referenced by GC pointer.
3085 * Write memory to GC physical address in guest physical memory.
3086 *
3087 * This will bypass MMIO and access handlers.
3088 *
3089 * @returns VBox status code.
3090 * @param pVM The cross context VM structure.
3091 * @param GCPhysDst The GC physical address of the destination.
3092 * @param pvSrc The source buffer.
3093 * @param cb The number of bytes to write.
3094 */
3095VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3096{
3097 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3098
3099 /*
3100 * Treat the first page as a special case.
3101 */
3102 if (!cb)
3103 return VINF_SUCCESS;
3104
3105 /* map the 1st page */
3106 void *pvDst;
3107 PGMPAGEMAPLOCK Lock;
3108 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3109 if (RT_FAILURE(rc))
3110 return rc;
3111
3112 /* optimize for the case where access is completely within the first page. */
3113 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
3114 if (RT_LIKELY(cb <= cbPage))
3115 {
3116 memcpy(pvDst, pvSrc, cb);
3117 PGMPhysReleasePageMappingLock(pVM, &Lock);
3118 return VINF_SUCCESS;
3119 }
3120
3121 /* copy to the end of the page. */
3122 memcpy(pvDst, pvSrc, cbPage);
3123 PGMPhysReleasePageMappingLock(pVM, &Lock);
3124 GCPhysDst += cbPage;
3125 pvSrc = (const uint8_t *)pvSrc + cbPage;
3126 cb -= cbPage;
3127
3128 /*
3129 * Page by page.
3130 */
3131 for (;;)
3132 {
3133 /* map the page */
3134 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3135 if (RT_FAILURE(rc))
3136 return rc;
3137
3138 /* last page? */
3139 if (cb <= PAGE_SIZE)
3140 {
3141 memcpy(pvDst, pvSrc, cb);
3142 PGMPhysReleasePageMappingLock(pVM, &Lock);
3143 return VINF_SUCCESS;
3144 }
3145
3146 /* copy the entire page and advance */
3147 memcpy(pvDst, pvSrc, PAGE_SIZE);
3148 PGMPhysReleasePageMappingLock(pVM, &Lock);
3149 GCPhysDst += PAGE_SIZE;
3150 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3151 cb -= PAGE_SIZE;
3152 }
3153 /* won't ever get here. */
3154}
3155
3156
3157/**
3158 * Read from guest physical memory referenced by GC pointer.
3159 *
3160 * This function uses the current CR3/CR0/CR4 of the guest and will
3161 * bypass access handlers and not set any accessed bits.
3162 *
3163 * @returns VBox status code.
3164 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3165 * @param pvDst The destination address.
3166 * @param GCPtrSrc The source address (GC pointer).
3167 * @param cb The number of bytes to read.
3168 */
3169VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3170{
3171 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3172/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3173
3174 /*
3175 * Treat the first page as a special case.
3176 */
3177 if (!cb)
3178 return VINF_SUCCESS;
3179
3180 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3181 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3182
3183 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3184 * when many VCPUs are fighting for the lock.
3185 */
3186 PGM_LOCK_VOID(pVM);
3187
3188 /* map the 1st page */
3189 void const *pvSrc;
3190 PGMPAGEMAPLOCK Lock;
3191 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3192 if (RT_FAILURE(rc))
3193 {
3194 PGM_UNLOCK(pVM);
3195 return rc;
3196 }
3197
3198 /* optimize for the case where access is completely within the first page. */
3199 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3200 if (RT_LIKELY(cb <= cbPage))
3201 {
3202 memcpy(pvDst, pvSrc, cb);
3203 PGMPhysReleasePageMappingLock(pVM, &Lock);
3204 PGM_UNLOCK(pVM);
3205 return VINF_SUCCESS;
3206 }
3207
3208 /* copy to the end of the page. */
3209 memcpy(pvDst, pvSrc, cbPage);
3210 PGMPhysReleasePageMappingLock(pVM, &Lock);
3211 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3212 pvDst = (uint8_t *)pvDst + cbPage;
3213 cb -= cbPage;
3214
3215 /*
3216 * Page by page.
3217 */
3218 for (;;)
3219 {
3220 /* map the page */
3221 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3222 if (RT_FAILURE(rc))
3223 {
3224 PGM_UNLOCK(pVM);
3225 return rc;
3226 }
3227
3228 /* last page? */
3229 if (cb <= PAGE_SIZE)
3230 {
3231 memcpy(pvDst, pvSrc, cb);
3232 PGMPhysReleasePageMappingLock(pVM, &Lock);
3233 PGM_UNLOCK(pVM);
3234 return VINF_SUCCESS;
3235 }
3236
3237 /* copy the entire page and advance */
3238 memcpy(pvDst, pvSrc, PAGE_SIZE);
3239 PGMPhysReleasePageMappingLock(pVM, &Lock);
3240 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3241 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3242 cb -= PAGE_SIZE;
3243 }
3244 /* won't ever get here. */
3245}
3246
3247
3248/**
3249 * Write to guest physical memory referenced by GC pointer.
3250 *
3251 * This function uses the current CR3/CR0/CR4 of the guest and will
3252 * bypass access handlers and not set dirty or accessed bits.
3253 *
3254 * @returns VBox status code.
3255 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3256 * @param GCPtrDst The destination address (GC pointer).
3257 * @param pvSrc The source address.
3258 * @param cb The number of bytes to write.
3259 */
3260VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3261{
3262 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3263 VMCPU_ASSERT_EMT(pVCpu);
3264
3265 /*
3266 * Treat the first page as a special case.
3267 */
3268 if (!cb)
3269 return VINF_SUCCESS;
3270
3271 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3272 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3273
3274 /* map the 1st page */
3275 void *pvDst;
3276 PGMPAGEMAPLOCK Lock;
3277 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3278 if (RT_FAILURE(rc))
3279 return rc;
3280
3281 /* optimize for the case where access is completely within the first page. */
3282 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3283 if (RT_LIKELY(cb <= cbPage))
3284 {
3285 memcpy(pvDst, pvSrc, cb);
3286 PGMPhysReleasePageMappingLock(pVM, &Lock);
3287 return VINF_SUCCESS;
3288 }
3289
3290 /* copy to the end of the page. */
3291 memcpy(pvDst, pvSrc, cbPage);
3292 PGMPhysReleasePageMappingLock(pVM, &Lock);
3293 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3294 pvSrc = (const uint8_t *)pvSrc + cbPage;
3295 cb -= cbPage;
3296
3297 /*
3298 * Page by page.
3299 */
3300 for (;;)
3301 {
3302 /* map the page */
3303 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3304 if (RT_FAILURE(rc))
3305 return rc;
3306
3307 /* last page? */
3308 if (cb <= PAGE_SIZE)
3309 {
3310 memcpy(pvDst, pvSrc, cb);
3311 PGMPhysReleasePageMappingLock(pVM, &Lock);
3312 return VINF_SUCCESS;
3313 }
3314
3315 /* copy the entire page and advance */
3316 memcpy(pvDst, pvSrc, PAGE_SIZE);
3317 PGMPhysReleasePageMappingLock(pVM, &Lock);
3318 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3319 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3320 cb -= PAGE_SIZE;
3321 }
3322 /* won't ever get here. */
3323}
3324
3325
3326/**
3327 * Write to guest physical memory referenced by GC pointer and update the PTE.
3328 *
3329 * This function uses the current CR3/CR0/CR4 of the guest and will
3330 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3331 *
3332 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3333 *
3334 * @returns VBox status code.
3335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3336 * @param GCPtrDst The destination address (GC pointer).
3337 * @param pvSrc The source address.
3338 * @param cb The number of bytes to write.
3339 */
3340VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3341{
3342 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3343 VMCPU_ASSERT_EMT(pVCpu);
3344
3345 /*
3346 * Treat the first page as a special case.
3347 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3348 */
3349 if (!cb)
3350 return VINF_SUCCESS;
3351
3352 /* map the 1st page */
3353 void *pvDst;
3354 PGMPAGEMAPLOCK Lock;
3355 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3356 if (RT_FAILURE(rc))
3357 return rc;
3358
3359 /* optimize for the case where access is completely within the first page. */
3360 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3361 if (RT_LIKELY(cb <= cbPage))
3362 {
3363 memcpy(pvDst, pvSrc, cb);
3364 PGMPhysReleasePageMappingLock(pVM, &Lock);
3365 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3366 return VINF_SUCCESS;
3367 }
3368
3369 /* copy to the end of the page. */
3370 memcpy(pvDst, pvSrc, cbPage);
3371 PGMPhysReleasePageMappingLock(pVM, &Lock);
3372 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3373 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3374 pvSrc = (const uint8_t *)pvSrc + cbPage;
3375 cb -= cbPage;
3376
3377 /*
3378 * Page by page.
3379 */
3380 for (;;)
3381 {
3382 /* map the page */
3383 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3384 if (RT_FAILURE(rc))
3385 return rc;
3386
3387 /* last page? */
3388 if (cb <= PAGE_SIZE)
3389 {
3390 memcpy(pvDst, pvSrc, cb);
3391 PGMPhysReleasePageMappingLock(pVM, &Lock);
3392 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3393 return VINF_SUCCESS;
3394 }
3395
3396 /* copy the entire page and advance */
3397 memcpy(pvDst, pvSrc, PAGE_SIZE);
3398 PGMPhysReleasePageMappingLock(pVM, &Lock);
3399 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3400 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3401 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3402 cb -= PAGE_SIZE;
3403 }
3404 /* won't ever get here. */
3405}
3406
3407
3408/**
3409 * Read from guest physical memory referenced by GC pointer.
3410 *
3411 * This function uses the current CR3/CR0/CR4 of the guest and will
3412 * respect access handlers and set accessed bits.
3413 *
3414 * @returns Strict VBox status, see PGMPhysRead for details.
3415 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3416 * specified virtual address.
3417 *
3418 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3419 * @param pvDst The destination address.
3420 * @param GCPtrSrc The source address (GC pointer).
3421 * @param cb The number of bytes to read.
3422 * @param enmOrigin Who is calling.
3423 * @thread EMT(pVCpu)
3424 */
3425VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3426{
3427 RTGCPHYS GCPhys;
3428 uint64_t fFlags;
3429 int rc;
3430 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3431 VMCPU_ASSERT_EMT(pVCpu);
3432
3433 /*
3434 * Anything to do?
3435 */
3436 if (!cb)
3437 return VINF_SUCCESS;
3438
3439 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3440
3441 /*
3442 * Optimize reads within a single page.
3443 */
3444 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3445 {
3446 /* Convert virtual to physical address + flags */
3447 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3448 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3449 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3450
3451 /* mark the guest page as accessed. */
3452 if (!(fFlags & X86_PTE_A))
3453 {
3454 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3455 AssertRC(rc);
3456 }
3457
3458 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3459 }
3460
3461 /*
3462 * Page by page.
3463 */
3464 for (;;)
3465 {
3466 /* Convert virtual to physical address + flags */
3467 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3468 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3469 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3470
3471 /* mark the guest page as accessed. */
3472 if (!(fFlags & X86_PTE_A))
3473 {
3474 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3475 AssertRC(rc);
3476 }
3477
3478 /* copy */
3479 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3480 if (cbRead < cb)
3481 {
3482 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3483 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3484 { /* likely */ }
3485 else
3486 return rcStrict;
3487 }
3488 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3489 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3490
3491 /* next */
3492 Assert(cb > cbRead);
3493 cb -= cbRead;
3494 pvDst = (uint8_t *)pvDst + cbRead;
3495 GCPtrSrc += cbRead;
3496 }
3497}
3498
3499
3500/**
3501 * Write to guest physical memory referenced by GC pointer.
3502 *
3503 * This function uses the current CR3/CR0/CR4 of the guest and will
3504 * respect access handlers and set dirty and accessed bits.
3505 *
3506 * @returns Strict VBox status, see PGMPhysWrite for details.
3507 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3508 * specified virtual address.
3509 *
3510 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3511 * @param GCPtrDst The destination address (GC pointer).
3512 * @param pvSrc The source address.
3513 * @param cb The number of bytes to write.
3514 * @param enmOrigin Who is calling.
3515 */
3516VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3517{
3518 RTGCPHYS GCPhys;
3519 uint64_t fFlags;
3520 int rc;
3521 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3522 VMCPU_ASSERT_EMT(pVCpu);
3523
3524 /*
3525 * Anything to do?
3526 */
3527 if (!cb)
3528 return VINF_SUCCESS;
3529
3530 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3531
3532 /*
3533 * Optimize writes within a single page.
3534 */
3535 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3536 {
3537 /* Convert virtual to physical address + flags */
3538 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3539 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3540 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3541
3542 /* Mention when we ignore X86_PTE_RW... */
3543 if (!(fFlags & X86_PTE_RW))
3544 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3545
3546 /* Mark the guest page as accessed and dirty if necessary. */
3547 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3548 {
3549 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3550 AssertRC(rc);
3551 }
3552
3553 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3554 }
3555
3556 /*
3557 * Page by page.
3558 */
3559 for (;;)
3560 {
3561 /* Convert virtual to physical address + flags */
3562 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3563 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3564 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3565
3566 /* Mention when we ignore X86_PTE_RW... */
3567 if (!(fFlags & X86_PTE_RW))
3568 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3569
3570 /* Mark the guest page as accessed and dirty if necessary. */
3571 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3572 {
3573 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3574 AssertRC(rc);
3575 }
3576
3577 /* copy */
3578 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3579 if (cbWrite < cb)
3580 {
3581 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3582 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3583 { /* likely */ }
3584 else
3585 return rcStrict;
3586 }
3587 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3588 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3589
3590 /* next */
3591 Assert(cb > cbWrite);
3592 cb -= cbWrite;
3593 pvSrc = (uint8_t *)pvSrc + cbWrite;
3594 GCPtrDst += cbWrite;
3595 }
3596}
3597
3598
3599/**
3600 * Performs a read of guest virtual memory for instruction emulation.
3601 *
3602 * This will check permissions, raise exceptions and update the access bits.
3603 *
3604 * The current implementation will bypass all access handlers. It may later be
3605 * changed to at least respect MMIO.
3606 *
3607 *
3608 * @returns VBox status code suitable to scheduling.
3609 * @retval VINF_SUCCESS if the read was performed successfully.
3610 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3611 *
3612 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3613 * @param pCtxCore The context core.
3614 * @param pvDst Where to put the bytes we've read.
3615 * @param GCPtrSrc The source address.
3616 * @param cb The number of bytes to read. Not more than a page.
3617 *
3618 * @remark This function will dynamically map physical pages in GC. This may unmap
3619 * mappings done by the caller. Be careful!
3620 */
3621VMMDECL(int) PGMPhysInterpretedRead(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3622{
3623 NOREF(pCtxCore);
3624 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3625 Assert(cb <= PAGE_SIZE);
3626 VMCPU_ASSERT_EMT(pVCpu);
3627
3628/** @todo r=bird: This isn't perfect!
3629 * -# It's not checking for reserved bits being 1.
3630 * -# It's not correctly dealing with the access bit.
3631 * -# It's not respecting MMIO memory or any other access handlers.
3632 */
3633 /*
3634 * 1. Translate virtual to physical. This may fault.
3635 * 2. Map the physical address.
3636 * 3. Do the read operation.
3637 * 4. Set access bits if required.
3638 */
3639 int rc;
3640 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3641 if (cb <= cb1)
3642 {
3643 /*
3644 * Not crossing pages.
3645 */
3646 RTGCPHYS GCPhys;
3647 uint64_t fFlags;
3648 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3649 if (RT_SUCCESS(rc))
3650 {
3651 /** @todo we should check reserved bits ... */
3652 PGMPAGEMAPLOCK PgMpLck;
3653 void const *pvSrc;
3654 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3655 switch (rc)
3656 {
3657 case VINF_SUCCESS:
3658 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3659 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3660 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3661 break;
3662 case VERR_PGM_PHYS_PAGE_RESERVED:
3663 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3664 memset(pvDst, 0xff, cb);
3665 break;
3666 default:
3667 Assert(RT_FAILURE_NP(rc));
3668 return rc;
3669 }
3670
3671 /** @todo access bit emulation isn't 100% correct. */
3672 if (!(fFlags & X86_PTE_A))
3673 {
3674 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3675 AssertRC(rc);
3676 }
3677 return VINF_SUCCESS;
3678 }
3679 }
3680 else
3681 {
3682 /*
3683 * Crosses pages.
3684 */
3685 size_t cb2 = cb - cb1;
3686 uint64_t fFlags1;
3687 RTGCPHYS GCPhys1;
3688 uint64_t fFlags2;
3689 RTGCPHYS GCPhys2;
3690 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3691 if (RT_SUCCESS(rc))
3692 {
3693 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3694 if (RT_SUCCESS(rc))
3695 {
3696 /** @todo we should check reserved bits ... */
3697 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3698 PGMPAGEMAPLOCK PgMpLck;
3699 void const *pvSrc1;
3700 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3701 switch (rc)
3702 {
3703 case VINF_SUCCESS:
3704 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3705 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3706 break;
3707 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3708 memset(pvDst, 0xff, cb1);
3709 break;
3710 default:
3711 Assert(RT_FAILURE_NP(rc));
3712 return rc;
3713 }
3714
3715 void const *pvSrc2;
3716 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3717 switch (rc)
3718 {
3719 case VINF_SUCCESS:
3720 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3721 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3722 break;
3723 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3724 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3725 break;
3726 default:
3727 Assert(RT_FAILURE_NP(rc));
3728 return rc;
3729 }
3730
3731 if (!(fFlags1 & X86_PTE_A))
3732 {
3733 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3734 AssertRC(rc);
3735 }
3736 if (!(fFlags2 & X86_PTE_A))
3737 {
3738 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3739 AssertRC(rc);
3740 }
3741 return VINF_SUCCESS;
3742 }
3743 }
3744 }
3745
3746 /*
3747 * Raise a #PF.
3748 */
3749 uint32_t uErr;
3750
3751 /* Get the current privilege level. */
3752 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3753 switch (rc)
3754 {
3755 case VINF_SUCCESS:
3756 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3757 break;
3758
3759 case VERR_PAGE_NOT_PRESENT:
3760 case VERR_PAGE_TABLE_NOT_PRESENT:
3761 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3762 break;
3763
3764 default:
3765 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3766 return rc;
3767 }
3768 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3769 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
3770 if (RT_SUCCESS(rc))
3771 return VINF_EM_RAW_GUEST_TRAP;
3772 return rc;
3773}
3774
3775
3776/**
3777 * Performs a read of guest virtual memory for instruction emulation.
3778 *
3779 * This will check permissions, raise exceptions and update the access bits.
3780 *
3781 * The current implementation will bypass all access handlers. It may later be
3782 * changed to at least respect MMIO.
3783 *
3784 *
3785 * @returns VBox status code suitable to scheduling.
3786 * @retval VINF_SUCCESS if the read was performed successfully.
3787 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3788 *
3789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3790 * @param pCtxCore The context core.
3791 * @param pvDst Where to put the bytes we've read.
3792 * @param GCPtrSrc The source address.
3793 * @param cb The number of bytes to read. Not more than a page.
3794 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3795 * an appropriate error status will be returned (no
3796 * informational at all).
3797 *
3798 *
3799 * @remarks Takes the PGM lock.
3800 * @remarks A page fault on the 2nd page of the access will be raised without
3801 * writing the bits on the first page since we're ASSUMING that the
3802 * caller is emulating an instruction access.
3803 * @remarks This function will dynamically map physical pages in GC. This may
3804 * unmap mappings done by the caller. Be careful!
3805 */
3806VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3807 bool fRaiseTrap)
3808{
3809 NOREF(pCtxCore);
3810 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3811 Assert(cb <= PAGE_SIZE);
3812 VMCPU_ASSERT_EMT(pVCpu);
3813
3814 /*
3815 * 1. Translate virtual to physical. This may fault.
3816 * 2. Map the physical address.
3817 * 3. Do the read operation.
3818 * 4. Set access bits if required.
3819 */
3820 int rc;
3821 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3822 if (cb <= cb1)
3823 {
3824 /*
3825 * Not crossing pages.
3826 */
3827 RTGCPHYS GCPhys;
3828 uint64_t fFlags;
3829 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3830 if (RT_SUCCESS(rc))
3831 {
3832 if (1) /** @todo we should check reserved bits ... */
3833 {
3834 const void *pvSrc;
3835 PGMPAGEMAPLOCK Lock;
3836 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3837 switch (rc)
3838 {
3839 case VINF_SUCCESS:
3840 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3841 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3842 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3843 PGMPhysReleasePageMappingLock(pVM, &Lock);
3844 break;
3845 case VERR_PGM_PHYS_PAGE_RESERVED:
3846 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3847 memset(pvDst, 0xff, cb);
3848 break;
3849 default:
3850 AssertMsgFailed(("%Rrc\n", rc));
3851 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3852 return rc;
3853 }
3854
3855 if (!(fFlags & X86_PTE_A))
3856 {
3857 /** @todo access bit emulation isn't 100% correct. */
3858 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3859 AssertRC(rc);
3860 }
3861 return VINF_SUCCESS;
3862 }
3863 }
3864 }
3865 else
3866 {
3867 /*
3868 * Crosses pages.
3869 */
3870 size_t cb2 = cb - cb1;
3871 uint64_t fFlags1;
3872 RTGCPHYS GCPhys1;
3873 uint64_t fFlags2;
3874 RTGCPHYS GCPhys2;
3875 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3876 if (RT_SUCCESS(rc))
3877 {
3878 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3879 if (RT_SUCCESS(rc))
3880 {
3881 if (1) /** @todo we should check reserved bits ... */
3882 {
3883 const void *pvSrc;
3884 PGMPAGEMAPLOCK Lock;
3885 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3886 switch (rc)
3887 {
3888 case VINF_SUCCESS:
3889 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3890 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3891 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3892 PGMPhysReleasePageMappingLock(pVM, &Lock);
3893 break;
3894 case VERR_PGM_PHYS_PAGE_RESERVED:
3895 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3896 memset(pvDst, 0xff, cb1);
3897 break;
3898 default:
3899 AssertMsgFailed(("%Rrc\n", rc));
3900 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3901 return rc;
3902 }
3903
3904 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3905 switch (rc)
3906 {
3907 case VINF_SUCCESS:
3908 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3909 PGMPhysReleasePageMappingLock(pVM, &Lock);
3910 break;
3911 case VERR_PGM_PHYS_PAGE_RESERVED:
3912 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3913 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3914 break;
3915 default:
3916 AssertMsgFailed(("%Rrc\n", rc));
3917 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3918 return rc;
3919 }
3920
3921 if (!(fFlags1 & X86_PTE_A))
3922 {
3923 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3924 AssertRC(rc);
3925 }
3926 if (!(fFlags2 & X86_PTE_A))
3927 {
3928 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3929 AssertRC(rc);
3930 }
3931 return VINF_SUCCESS;
3932 }
3933 /* sort out which page */
3934 }
3935 else
3936 GCPtrSrc += cb1; /* fault on 2nd page */
3937 }
3938 }
3939
3940 /*
3941 * Raise a #PF if we're allowed to do that.
3942 */
3943 /* Calc the error bits. */
3944 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3945 uint32_t uErr;
3946 switch (rc)
3947 {
3948 case VINF_SUCCESS:
3949 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3950 rc = VERR_ACCESS_DENIED;
3951 break;
3952
3953 case VERR_PAGE_NOT_PRESENT:
3954 case VERR_PAGE_TABLE_NOT_PRESENT:
3955 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3956 break;
3957
3958 default:
3959 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3960 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3961 return rc;
3962 }
3963 if (fRaiseTrap)
3964 {
3965 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3966 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
3967 if (RT_SUCCESS(rc))
3968 return VINF_EM_RAW_GUEST_TRAP;
3969 return rc;
3970 }
3971 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3972 return rc;
3973}
3974
3975
3976/**
3977 * Performs a write to guest virtual memory for instruction emulation.
3978 *
3979 * This will check permissions, raise exceptions and update the dirty and access
3980 * bits.
3981 *
3982 * @returns VBox status code suitable to scheduling.
3983 * @retval VINF_SUCCESS if the read was performed successfully.
3984 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3985 *
3986 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3987 * @param pCtxCore The context core.
3988 * @param GCPtrDst The destination address.
3989 * @param pvSrc What to write.
3990 * @param cb The number of bytes to write. Not more than a page.
3991 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3992 * an appropriate error status will be returned (no
3993 * informational at all).
3994 *
3995 * @remarks Takes the PGM lock.
3996 * @remarks A page fault on the 2nd page of the access will be raised without
3997 * writing the bits on the first page since we're ASSUMING that the
3998 * caller is emulating an instruction access.
3999 * @remarks This function will dynamically map physical pages in GC. This may
4000 * unmap mappings done by the caller. Be careful!
4001 */
4002VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
4003 size_t cb, bool fRaiseTrap)
4004{
4005 NOREF(pCtxCore);
4006 Assert(cb <= PAGE_SIZE);
4007 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4008 VMCPU_ASSERT_EMT(pVCpu);
4009
4010 /*
4011 * 1. Translate virtual to physical. This may fault.
4012 * 2. Map the physical address.
4013 * 3. Do the write operation.
4014 * 4. Set access bits if required.
4015 */
4016 /** @todo Since this method is frequently used by EMInterpret or IOM
4017 * upon a write fault to an write access monitored page, we can
4018 * reuse the guest page table walking from the \#PF code. */
4019 int rc;
4020 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
4021 if (cb <= cb1)
4022 {
4023 /*
4024 * Not crossing pages.
4025 */
4026 RTGCPHYS GCPhys;
4027 uint64_t fFlags;
4028 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags, &GCPhys);
4029 if (RT_SUCCESS(rc))
4030 {
4031 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
4032 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4033 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
4034 {
4035 void *pvDst;
4036 PGMPAGEMAPLOCK Lock;
4037 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
4038 switch (rc)
4039 {
4040 case VINF_SUCCESS:
4041 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4042 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
4043 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
4044 PGMPhysReleasePageMappingLock(pVM, &Lock);
4045 break;
4046 case VERR_PGM_PHYS_PAGE_RESERVED:
4047 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4048 /* bit bucket */
4049 break;
4050 default:
4051 AssertMsgFailed(("%Rrc\n", rc));
4052 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4053 return rc;
4054 }
4055
4056 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
4057 {
4058 /** @todo dirty & access bit emulation isn't 100% correct. */
4059 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
4060 AssertRC(rc);
4061 }
4062 return VINF_SUCCESS;
4063 }
4064 rc = VERR_ACCESS_DENIED;
4065 }
4066 }
4067 else
4068 {
4069 /*
4070 * Crosses pages.
4071 */
4072 size_t cb2 = cb - cb1;
4073 uint64_t fFlags1;
4074 RTGCPHYS GCPhys1;
4075 uint64_t fFlags2;
4076 RTGCPHYS GCPhys2;
4077 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
4078 if (RT_SUCCESS(rc))
4079 {
4080 rc = PGMGstGetPage(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
4081 if (RT_SUCCESS(rc))
4082 {
4083 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
4084 && (fFlags2 & X86_PTE_RW))
4085 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4086 && CPUMGetGuestCPL(pVCpu) <= 2) )
4087 {
4088 void *pvDst;
4089 PGMPAGEMAPLOCK Lock;
4090 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
4091 switch (rc)
4092 {
4093 case VINF_SUCCESS:
4094 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4095 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
4096 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
4097 PGMPhysReleasePageMappingLock(pVM, &Lock);
4098 break;
4099 case VERR_PGM_PHYS_PAGE_RESERVED:
4100 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4101 /* bit bucket */
4102 break;
4103 default:
4104 AssertMsgFailed(("%Rrc\n", rc));
4105 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4106 return rc;
4107 }
4108
4109 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
4110 switch (rc)
4111 {
4112 case VINF_SUCCESS:
4113 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
4114 PGMPhysReleasePageMappingLock(pVM, &Lock);
4115 break;
4116 case VERR_PGM_PHYS_PAGE_RESERVED:
4117 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4118 /* bit bucket */
4119 break;
4120 default:
4121 AssertMsgFailed(("%Rrc\n", rc));
4122 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4123 return rc;
4124 }
4125
4126 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
4127 {
4128 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4129 AssertRC(rc);
4130 }
4131 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
4132 {
4133 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4134 AssertRC(rc);
4135 }
4136 return VINF_SUCCESS;
4137 }
4138 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
4139 GCPtrDst += cb1; /* fault on the 2nd page. */
4140 rc = VERR_ACCESS_DENIED;
4141 }
4142 else
4143 GCPtrDst += cb1; /* fault on the 2nd page. */
4144 }
4145 }
4146
4147 /*
4148 * Raise a #PF if we're allowed to do that.
4149 */
4150 /* Calc the error bits. */
4151 uint32_t uErr;
4152 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4153 switch (rc)
4154 {
4155 case VINF_SUCCESS:
4156 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4157 rc = VERR_ACCESS_DENIED;
4158 break;
4159
4160 case VERR_ACCESS_DENIED:
4161 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4162 break;
4163
4164 case VERR_PAGE_NOT_PRESENT:
4165 case VERR_PAGE_TABLE_NOT_PRESENT:
4166 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4167 break;
4168
4169 default:
4170 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4171 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4172 return rc;
4173 }
4174 if (fRaiseTrap)
4175 {
4176 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4177 rc = TRPMAssertXcptPF(pVCpu, GCPtrDst, uErr);
4178 if (RT_SUCCESS(rc))
4179 return VINF_EM_RAW_GUEST_TRAP;
4180 return rc;
4181 }
4182 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4183 return rc;
4184}
4185
4186
4187/**
4188 * Return the page type of the specified physical address.
4189 *
4190 * @returns The page type.
4191 * @param pVM The cross context VM structure.
4192 * @param GCPhys Guest physical address
4193 */
4194VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
4195{
4196 PGM_LOCK_VOID(pVM);
4197 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4198 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4199 PGM_UNLOCK(pVM);
4200
4201 return enmPgType;
4202}
4203
4204
4205/**
4206 * Converts a GC physical address to a HC ring-3 pointer, with some
4207 * additional checks.
4208 *
4209 * @returns VBox status code (no informational statuses).
4210 *
4211 * @param pVM The cross context VM structure.
4212 * @param pVCpu The cross context virtual CPU structure of the
4213 * calling EMT.
4214 * @param GCPhys The GC physical address to convert. This API mask
4215 * the A20 line when necessary.
4216 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
4217 * be done while holding the PGM lock.
4218 * @param ppb Where to store the pointer corresponding to GCPhys
4219 * on success.
4220 * @param pfTlb The TLB flags and revision. We only add stuff.
4221 *
4222 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
4223 * PGMPhysIemGCPhys2Ptr.
4224 *
4225 * @thread EMT(pVCpu).
4226 */
4227VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
4228 R3R0PTRTYPE(uint8_t *) *ppb,
4229 uint64_t *pfTlb)
4230{
4231 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4232 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
4233
4234 PGM_LOCK_VOID(pVM);
4235
4236 PPGMRAMRANGE pRam;
4237 PPGMPAGE pPage;
4238 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4239 if (RT_SUCCESS(rc))
4240 {
4241 if (!PGM_PAGE_IS_BALLOONED(pPage))
4242 {
4243 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4244 {
4245 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
4246 {
4247 /*
4248 * No access handler.
4249 */
4250 switch (PGM_PAGE_GET_STATE(pPage))
4251 {
4252 case PGM_PAGE_STATE_ALLOCATED:
4253 *pfTlb |= *puTlbPhysRev;
4254 break;
4255 case PGM_PAGE_STATE_BALLOONED:
4256 AssertFailed();
4257 RT_FALL_THRU();
4258 case PGM_PAGE_STATE_ZERO:
4259 case PGM_PAGE_STATE_SHARED:
4260 case PGM_PAGE_STATE_WRITE_MONITORED:
4261 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4262 break;
4263 }
4264
4265 PPGMPAGEMAPTLBE pTlbe;
4266 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4267 AssertLogRelRCReturn(rc, rc);
4268 *ppb = (uint8_t *)pTlbe->pv;
4269 }
4270 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
4271 {
4272 /*
4273 * MMIO or similar all access handler: Catch all access.
4274 */
4275 *pfTlb |= *puTlbPhysRev
4276 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4277 *ppb = NULL;
4278 }
4279 else
4280 {
4281 /*
4282 * Write access handler: Catch write accesses if active.
4283 */
4284 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
4285 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4286 else
4287 switch (PGM_PAGE_GET_STATE(pPage))
4288 {
4289 case PGM_PAGE_STATE_ALLOCATED:
4290 *pfTlb |= *puTlbPhysRev;
4291 break;
4292 case PGM_PAGE_STATE_BALLOONED:
4293 AssertFailed();
4294 RT_FALL_THRU();
4295 case PGM_PAGE_STATE_ZERO:
4296 case PGM_PAGE_STATE_SHARED:
4297 case PGM_PAGE_STATE_WRITE_MONITORED:
4298 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4299 break;
4300 }
4301
4302 PPGMPAGEMAPTLBE pTlbe;
4303 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4304 AssertLogRelRCReturn(rc, rc);
4305 *ppb = (uint8_t *)pTlbe->pv;
4306 }
4307 }
4308 else
4309 {
4310 /* Alias MMIO: For now, we catch all access. */
4311 *pfTlb |= *puTlbPhysRev
4312 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4313 *ppb = NULL;
4314 }
4315 }
4316 else
4317 {
4318 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
4319 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4320 *ppb = NULL;
4321 }
4322 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
4323 }
4324 else
4325 {
4326 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4327 *ppb = NULL;
4328 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
4329 }
4330
4331 PGM_UNLOCK(pVM);
4332 return VINF_SUCCESS;
4333}
4334
4335
4336/**
4337 * Converts a GC physical address to a HC ring-3 pointer, with some
4338 * additional checks.
4339 *
4340 * @returns VBox status code (no informational statuses).
4341 * @retval VINF_SUCCESS on success.
4342 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4343 * access handler of some kind.
4344 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4345 * accesses or is odd in any way.
4346 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4347 *
4348 * @param pVM The cross context VM structure.
4349 * @param pVCpu The cross context virtual CPU structure of the
4350 * calling EMT.
4351 * @param GCPhys The GC physical address to convert. This API mask
4352 * the A20 line when necessary.
4353 * @param fWritable Whether write access is required.
4354 * @param fByPassHandlers Whether to bypass access handlers.
4355 * @param ppv Where to store the pointer corresponding to GCPhys
4356 * on success.
4357 * @param pLock
4358 *
4359 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4360 * @thread EMT(pVCpu).
4361 */
4362VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4363 void **ppv, PPGMPAGEMAPLOCK pLock)
4364{
4365 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4366
4367 PGM_LOCK_VOID(pVM);
4368
4369 PPGMRAMRANGE pRam;
4370 PPGMPAGE pPage;
4371 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4372 if (RT_SUCCESS(rc))
4373 {
4374 if (PGM_PAGE_IS_BALLOONED(pPage))
4375 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4376 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4377 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4378 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4379 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4380 rc = VINF_SUCCESS;
4381 else
4382 {
4383 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4384 {
4385 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4386 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4387 }
4388 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4389 {
4390 Assert(!fByPassHandlers);
4391 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4392 }
4393 }
4394 if (RT_SUCCESS(rc))
4395 {
4396 int rc2;
4397
4398 /* Make sure what we return is writable. */
4399 if (fWritable)
4400 switch (PGM_PAGE_GET_STATE(pPage))
4401 {
4402 case PGM_PAGE_STATE_ALLOCATED:
4403 break;
4404 case PGM_PAGE_STATE_BALLOONED:
4405 AssertFailed();
4406 break;
4407 case PGM_PAGE_STATE_ZERO:
4408 case PGM_PAGE_STATE_SHARED:
4409 case PGM_PAGE_STATE_WRITE_MONITORED:
4410 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4411 AssertLogRelRCReturn(rc2, rc2);
4412 break;
4413 }
4414
4415 /* Get a ring-3 mapping of the address. */
4416 PPGMPAGEMAPTLBE pTlbe;
4417 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4418 AssertLogRelRCReturn(rc2, rc2);
4419
4420 /* Lock it and calculate the address. */
4421 if (fWritable)
4422 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4423 else
4424 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4425 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4426
4427 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4428 }
4429 else
4430 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4431
4432 /* else: handler catching all access, no pointer returned. */
4433 }
4434 else
4435 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4436
4437 PGM_UNLOCK(pVM);
4438 return rc;
4439}
4440
4441
4442/**
4443 * Checks if the give GCPhys page requires special handling for the given access
4444 * because it's MMIO or otherwise monitored.
4445 *
4446 * @returns VBox status code (no informational statuses).
4447 * @retval VINF_SUCCESS on success.
4448 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4449 * access handler of some kind.
4450 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4451 * accesses or is odd in any way.
4452 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4453 *
4454 * @param pVM The cross context VM structure.
4455 * @param GCPhys The GC physical address to convert. Since this is
4456 * only used for filling the REM TLB, the A20 mask must
4457 * be applied before calling this API.
4458 * @param fWritable Whether write access is required.
4459 * @param fByPassHandlers Whether to bypass access handlers.
4460 *
4461 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4462 * a stop gap thing that should be removed once there is a better TLB
4463 * for virtual address accesses.
4464 */
4465VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4466{
4467 PGM_LOCK_VOID(pVM);
4468 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4469
4470 PPGMRAMRANGE pRam;
4471 PPGMPAGE pPage;
4472 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4473 if (RT_SUCCESS(rc))
4474 {
4475 if (PGM_PAGE_IS_BALLOONED(pPage))
4476 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4477 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4478 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4479 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4480 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4481 rc = VINF_SUCCESS;
4482 else
4483 {
4484 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4485 {
4486 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4487 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4488 }
4489 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4490 {
4491 Assert(!fByPassHandlers);
4492 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4493 }
4494 }
4495 }
4496
4497 PGM_UNLOCK(pVM);
4498 return rc;
4499}
4500
4501#ifdef VBOX_WITH_NATIVE_NEM
4502
4503/**
4504 * Interface used by NEM to check what to do on a memory access exit.
4505 *
4506 * @returns VBox status code.
4507 * @param pVM The cross context VM structure.
4508 * @param pVCpu The cross context per virtual CPU structure.
4509 * Optional.
4510 * @param GCPhys The guest physical address.
4511 * @param fMakeWritable Whether to try make the page writable or not. If it
4512 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
4513 * be returned and the return code will be unaffected
4514 * @param pInfo Where to return the page information. This is
4515 * initialized even on failure.
4516 * @param pfnChecker Page in-sync checker callback. Optional.
4517 * @param pvUser User argument to pass to pfnChecker.
4518 */
4519VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4520 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4521{
4522 PGM_LOCK_VOID(pVM);
4523
4524 PPGMPAGE pPage;
4525 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4526 if (RT_SUCCESS(rc))
4527 {
4528 /* Try make it writable if requested. */
4529 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4530 if (fMakeWritable)
4531 switch (PGM_PAGE_GET_STATE(pPage))
4532 {
4533 case PGM_PAGE_STATE_SHARED:
4534 case PGM_PAGE_STATE_WRITE_MONITORED:
4535 case PGM_PAGE_STATE_ZERO:
4536 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4537 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4538 rc = VINF_SUCCESS;
4539 break;
4540 }
4541
4542 /* Fill in the info. */
4543 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4544 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4545 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4546 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4547 pInfo->enmType = enmType;
4548 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4549 switch (PGM_PAGE_GET_STATE(pPage))
4550 {
4551 case PGM_PAGE_STATE_ALLOCATED:
4552 pInfo->fZeroPage = 0;
4553 break;
4554
4555 case PGM_PAGE_STATE_ZERO:
4556 pInfo->fZeroPage = 1;
4557 break;
4558
4559 case PGM_PAGE_STATE_WRITE_MONITORED:
4560 pInfo->fZeroPage = 0;
4561 break;
4562
4563 case PGM_PAGE_STATE_SHARED:
4564 pInfo->fZeroPage = 0;
4565 break;
4566
4567 case PGM_PAGE_STATE_BALLOONED:
4568 pInfo->fZeroPage = 1;
4569 break;
4570
4571 default:
4572 pInfo->fZeroPage = 1;
4573 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4574 }
4575
4576 /* Call the checker and update NEM state. */
4577 if (pfnChecker)
4578 {
4579 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4580 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4581 }
4582
4583 /* Done. */
4584 PGM_UNLOCK(pVM);
4585 }
4586 else
4587 {
4588 PGM_UNLOCK(pVM);
4589
4590 pInfo->HCPhys = NIL_RTHCPHYS;
4591 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4592 pInfo->u2NemState = 0;
4593 pInfo->fHasHandlers = 0;
4594 pInfo->fZeroPage = 0;
4595 pInfo->enmType = PGMPAGETYPE_INVALID;
4596 }
4597
4598 return rc;
4599}
4600
4601
4602/**
4603 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4604 * or higher.
4605 *
4606 * @returns VBox status code from callback.
4607 * @param pVM The cross context VM structure.
4608 * @param pVCpu The cross context per CPU structure. This is
4609 * optional as its only for passing to callback.
4610 * @param uMinState The minimum NEM state value to call on.
4611 * @param pfnCallback The callback function.
4612 * @param pvUser User argument for the callback.
4613 */
4614VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4615 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4616{
4617 /*
4618 * Just brute force this problem.
4619 */
4620 PGM_LOCK_VOID(pVM);
4621 int rc = VINF_SUCCESS;
4622 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4623 {
4624 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4625 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4626 {
4627 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4628 if (u2State < uMinState)
4629 { /* likely */ }
4630 else
4631 {
4632 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4633 if (RT_SUCCESS(rc))
4634 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4635 else
4636 break;
4637 }
4638 }
4639 }
4640 PGM_UNLOCK(pVM);
4641
4642 return rc;
4643}
4644
4645
4646/**
4647 * Helper for setting the NEM state for a range of pages.
4648 *
4649 * @param paPages Array of pages to modify.
4650 * @param cPages How many pages to modify.
4651 * @param u2State The new state value.
4652 */
4653void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4654{
4655 PPGMPAGE pPage = paPages;
4656 while (cPages-- > 0)
4657 {
4658 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4659 pPage++;
4660 }
4661}
4662
4663#endif /* VBOX_WITH_NATIVE_NEM */
4664
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette