VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 92780

Last change on this file since 92780 was 92426, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Refactor PGMGstGetPage and related API and functions to pass more info back to callers on page walk failures.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 142.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 92426 2021-11-15 13:25:47Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
51 * Checks if valid physical access handler return code (normal handler, not PF).
52 *
53 * Checks if the given strict status code is one of the expected ones for a
54 * physical access handler in the current context.
55 *
56 * @returns true or false.
57 * @param a_rcStrict The status code.
58 * @param a_fWrite Whether it is a write or read being serviced.
59 *
60 * @remarks We wish to keep the list of statuses here as short as possible.
61 * When changing, please make sure to update the PGMPhysRead,
62 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
63 */
64#ifdef IN_RING3
65# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
66 ( (a_rcStrict) == VINF_SUCCESS \
67 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
68#elif defined(IN_RING0)
69#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
70 ( (a_rcStrict) == VINF_SUCCESS \
71 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
72 \
73 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
74 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
75 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
76 \
77 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
78 || (a_rcStrict) == VINF_EM_DBG_STOP \
79 || (a_rcStrict) == VINF_EM_DBG_EVENT \
80 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
81 || (a_rcStrict) == VINF_EM_OFF \
82 || (a_rcStrict) == VINF_EM_SUSPEND \
83 || (a_rcStrict) == VINF_EM_RESET \
84 )
85#else
86# error "Context?"
87#endif
88
89/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
90 * Checks if valid virtual access handler return code (normal handler, not PF).
91 *
92 * Checks if the given strict status code is one of the expected ones for a
93 * virtual access handler in the current context.
94 *
95 * @returns true or false.
96 * @param a_rcStrict The status code.
97 * @param a_fWrite Whether it is a write or read being serviced.
98 *
99 * @remarks We wish to keep the list of statuses here as short as possible.
100 * When changing, please make sure to update the PGMPhysRead,
101 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
102 */
103#ifdef IN_RING3
104# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
105 ( (a_rcStrict) == VINF_SUCCESS \
106 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
107#elif defined(IN_RING0)
108# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
109 (false /* no virtual handlers in ring-0! */ )
110#else
111# error "Context?"
112#endif
113
114
115
116#ifndef IN_RING3
117
118/**
119 * @callback_method_impl{FNPGMPHYSHANDLER,
120 * Dummy for forcing ring-3 handling of the access.}
121 */
122DECLEXPORT(VBOXSTRICTRC)
123pgmPhysHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
124 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
125{
126 NOREF(pVM); NOREF(pVCpu); NOREF(GCPhys); NOREF(pvPhys); NOREF(pvBuf); NOREF(cbBuf);
127 NOREF(enmAccessType); NOREF(enmOrigin); NOREF(pvUser);
128 return VINF_EM_RAW_EMULATE_INSTR;
129}
130
131
132/**
133 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
134 * Dummy for forcing ring-3 handling of the access.}
135 */
136VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
137 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
138{
139 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
140 return VINF_EM_RAW_EMULATE_INSTR;
141}
142
143
144/**
145 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
146 * \#PF access handler callback for guest ROM range write access.}
147 *
148 * @remarks The @a pvUser argument points to the PGMROMRANGE.
149 */
150DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
151 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
152{
153 int rc;
154 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
155 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
156 NOREF(uErrorCode); NOREF(pvFault);
157
158 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
159
160 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
161 switch (pRom->aPages[iPage].enmProt)
162 {
163 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
164 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
165 {
166 /*
167 * If it's a simple instruction which doesn't change the cpu state
168 * we will simply skip it. Otherwise we'll have to defer it to REM.
169 */
170 uint32_t cbOp;
171 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
172 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
173 if ( RT_SUCCESS(rc)
174 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
175 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
176 {
177 switch (pDis->bOpCode)
178 {
179 /** @todo Find other instructions we can safely skip, possibly
180 * adding this kind of detection to DIS or EM. */
181 case OP_MOV:
182 pRegFrame->rip += cbOp;
183 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
184 return VINF_SUCCESS;
185 }
186 }
187 break;
188 }
189
190 case PGMROMPROT_READ_RAM_WRITE_RAM:
191 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
192 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
193 AssertRC(rc);
194 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
195
196 case PGMROMPROT_READ_ROM_WRITE_RAM:
197 /* Handle it in ring-3 because it's *way* easier there. */
198 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
199 break;
200
201 default:
202 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
203 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
204 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
205 }
206
207 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
208 return VINF_EM_RAW_EMULATE_INSTR;
209}
210
211#endif /* !IN_RING3 */
212
213
214/**
215 * @callback_method_impl{FNPGMPHYSHANDLER,
216 * Access handler callback for ROM write accesses.}
217 *
218 * @remarks The @a pvUser argument points to the PGMROMRANGE.
219 */
220PGM_ALL_CB2_DECL(VBOXSTRICTRC)
221pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
222 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
223{
224 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
225 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
226 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
227 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
228 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
229 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);
230
231 if (enmAccessType == PGMACCESSTYPE_READ)
232 {
233 switch (pRomPage->enmProt)
234 {
235 /*
236 * Take the default action.
237 */
238 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
239 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
240 case PGMROMPROT_READ_ROM_WRITE_RAM:
241 case PGMROMPROT_READ_RAM_WRITE_RAM:
242 return VINF_PGM_HANDLER_DO_DEFAULT;
243
244 default:
245 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
246 pRom->aPages[iPage].enmProt, iPage, GCPhys),
247 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
248 }
249 }
250 else
251 {
252 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
253 switch (pRomPage->enmProt)
254 {
255 /*
256 * Ignore writes.
257 */
258 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
259 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
260 return VINF_SUCCESS;
261
262 /*
263 * Write to the RAM page.
264 */
265 case PGMROMPROT_READ_ROM_WRITE_RAM:
266 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
267 {
268 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
269 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
270
271 /*
272 * Take the lock, do lazy allocation, map the page and copy the data.
273 *
274 * Note that we have to bypass the mapping TLB since it works on
275 * guest physical addresses and entering the shadow page would
276 * kind of screw things up...
277 */
278 PGM_LOCK_VOID(pVM);
279
280 PPGMPAGE pShadowPage = &pRomPage->Shadow;
281 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
282 {
283 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
284 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
285 }
286
287 void *pvDstPage;
288 int rc;
289#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
290 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
291 {
292 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
293 rc = VINF_SUCCESS;
294 }
295 else
296#endif
297 {
298 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
299 if (RT_SUCCESS(rc))
300 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK);
301 }
302 if (RT_SUCCESS(rc))
303 {
304 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
305 pRomPage->LiveSave.fWrittenTo = true;
306
307 AssertMsg( rc == VINF_SUCCESS
308 || ( rc == VINF_PGM_SYNC_CR3
309 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
310 , ("%Rrc\n", rc));
311 rc = VINF_SUCCESS;
312 }
313
314 PGM_UNLOCK(pVM);
315 return rc;
316 }
317
318 default:
319 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
320 pRom->aPages[iPage].enmProt, iPage, GCPhys),
321 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
322 }
323 }
324}
325
326
327/**
328 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
329 */
330static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uintptr_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
331{
332 /*
333 * Get the MMIO2 range.
334 */
335 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
336 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
337 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
338 Assert(pMmio2->idMmio2 == hMmio2);
339 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
340 VERR_INTERNAL_ERROR_4);
341
342 /*
343 * Get the page and make sure it's an MMIO2 page.
344 */
345 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
346 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
347 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
348
349 /*
350 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
351 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
352 * page is dirty, saving the need for additional storage (bitmap).)
353 */
354 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
355
356 /*
357 * Disable the handler for this page.
358 */
359 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
360 AssertRC(rc);
361#ifndef IN_RING3
362 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
363 {
364 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
365 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
366 }
367#else
368 RT_NOREF(pVCpu, GCPtr);
369#endif
370 return VINF_SUCCESS;
371}
372
373
374#ifndef IN_RING3
375/**
376 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
377 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
378 *
379 * @remarks The @a pvUser is the MMIO2 index.
380 */
381DECLEXPORT(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
382 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
383{
384 RT_NOREF(pVCpu, uErrorCode, pRegFrame);
385 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
386 if (RT_SUCCESS(rcStrict))
387 {
388 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhysFault, pvFault);
389 PGM_UNLOCK(pVM);
390 }
391 return rcStrict;
392}
393#endif /* !IN_RING3 */
394
395
396/**
397 * @callback_method_impl{FNPGMPHYSHANDLER,
398 * Access handler callback for MMIO2 dirty page tracing.}
399 *
400 * @remarks The @a pvUser is the MMIO2 index.
401 */
402PGM_ALL_CB2_DECL(VBOXSTRICTRC)
403pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
404 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
405{
406 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
407 if (RT_SUCCESS(rcStrict))
408 {
409 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, (uintptr_t)pvUser, GCPhys, ~(RTGCPTR)0);
410 PGM_UNLOCK(pVM);
411 if (rcStrict == VINF_SUCCESS)
412 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
413 }
414 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
415 return rcStrict;
416}
417
418
419/**
420 * Invalidates the RAM range TLBs.
421 *
422 * @param pVM The cross context VM structure.
423 */
424void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
425{
426 PGM_LOCK_VOID(pVM);
427 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
428 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
429 PGM_UNLOCK(pVM);
430}
431
432
433/**
434 * Tests if a value of type RTGCPHYS is negative if the type had been signed
435 * instead of unsigned.
436 *
437 * @returns @c true if negative, @c false if positive or zero.
438 * @param a_GCPhys The value to test.
439 * @todo Move me to iprt/types.h.
440 */
441#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
442
443
444/**
445 * Slow worker for pgmPhysGetRange.
446 *
447 * @copydoc pgmPhysGetRange
448 */
449PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
450{
451 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
452
453 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
454 while (pRam)
455 {
456 RTGCPHYS off = GCPhys - pRam->GCPhys;
457 if (off < pRam->cb)
458 {
459 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
460 return pRam;
461 }
462 if (RTGCPHYS_IS_NEGATIVE(off))
463 pRam = pRam->CTX_SUFF(pLeft);
464 else
465 pRam = pRam->CTX_SUFF(pRight);
466 }
467 return NULL;
468}
469
470
471/**
472 * Slow worker for pgmPhysGetRangeAtOrAbove.
473 *
474 * @copydoc pgmPhysGetRangeAtOrAbove
475 */
476PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
477{
478 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
479
480 PPGMRAMRANGE pLastLeft = NULL;
481 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
482 while (pRam)
483 {
484 RTGCPHYS off = GCPhys - pRam->GCPhys;
485 if (off < pRam->cb)
486 {
487 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
488 return pRam;
489 }
490 if (RTGCPHYS_IS_NEGATIVE(off))
491 {
492 pLastLeft = pRam;
493 pRam = pRam->CTX_SUFF(pLeft);
494 }
495 else
496 pRam = pRam->CTX_SUFF(pRight);
497 }
498 return pLastLeft;
499}
500
501
502/**
503 * Slow worker for pgmPhysGetPage.
504 *
505 * @copydoc pgmPhysGetPage
506 */
507PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
508{
509 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
510
511 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
512 while (pRam)
513 {
514 RTGCPHYS off = GCPhys - pRam->GCPhys;
515 if (off < pRam->cb)
516 {
517 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
518 return &pRam->aPages[off >> PAGE_SHIFT];
519 }
520
521 if (RTGCPHYS_IS_NEGATIVE(off))
522 pRam = pRam->CTX_SUFF(pLeft);
523 else
524 pRam = pRam->CTX_SUFF(pRight);
525 }
526 return NULL;
527}
528
529
530/**
531 * Slow worker for pgmPhysGetPageEx.
532 *
533 * @copydoc pgmPhysGetPageEx
534 */
535int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
536{
537 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
538
539 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
540 while (pRam)
541 {
542 RTGCPHYS off = GCPhys - pRam->GCPhys;
543 if (off < pRam->cb)
544 {
545 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
546 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
547 return VINF_SUCCESS;
548 }
549
550 if (RTGCPHYS_IS_NEGATIVE(off))
551 pRam = pRam->CTX_SUFF(pLeft);
552 else
553 pRam = pRam->CTX_SUFF(pRight);
554 }
555
556 *ppPage = NULL;
557 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
558}
559
560
561/**
562 * Slow worker for pgmPhysGetPageAndRangeEx.
563 *
564 * @copydoc pgmPhysGetPageAndRangeEx
565 */
566int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
567{
568 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
569
570 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
571 while (pRam)
572 {
573 RTGCPHYS off = GCPhys - pRam->GCPhys;
574 if (off < pRam->cb)
575 {
576 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
577 *ppRam = pRam;
578 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
579 return VINF_SUCCESS;
580 }
581
582 if (RTGCPHYS_IS_NEGATIVE(off))
583 pRam = pRam->CTX_SUFF(pLeft);
584 else
585 pRam = pRam->CTX_SUFF(pRight);
586 }
587
588 *ppRam = NULL;
589 *ppPage = NULL;
590 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
591}
592
593
594/**
595 * Checks if Address Gate 20 is enabled or not.
596 *
597 * @returns true if enabled.
598 * @returns false if disabled.
599 * @param pVCpu The cross context virtual CPU structure.
600 */
601VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
602{
603 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
604 return pVCpu->pgm.s.fA20Enabled;
605}
606
607
608/**
609 * Validates a GC physical address.
610 *
611 * @returns true if valid.
612 * @returns false if invalid.
613 * @param pVM The cross context VM structure.
614 * @param GCPhys The physical address to validate.
615 */
616VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
617{
618 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
619 return pPage != NULL;
620}
621
622
623/**
624 * Checks if a GC physical address is a normal page,
625 * i.e. not ROM, MMIO or reserved.
626 *
627 * @returns true if normal.
628 * @returns false if invalid, ROM, MMIO or reserved page.
629 * @param pVM The cross context VM structure.
630 * @param GCPhys The physical address to check.
631 */
632VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
633{
634 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
635 return pPage
636 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
637}
638
639
640/**
641 * Converts a GC physical address to a HC physical address.
642 *
643 * @returns VINF_SUCCESS on success.
644 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
645 * page but has no physical backing.
646 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
647 * GC physical address.
648 *
649 * @param pVM The cross context VM structure.
650 * @param GCPhys The GC physical address to convert.
651 * @param pHCPhys Where to store the HC physical address on success.
652 */
653VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
654{
655 PGM_LOCK_VOID(pVM);
656 PPGMPAGE pPage;
657 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
658 if (RT_SUCCESS(rc))
659 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
660 PGM_UNLOCK(pVM);
661 return rc;
662}
663
664
665/**
666 * Invalidates all page mapping TLBs.
667 *
668 * @param pVM The cross context VM structure.
669 */
670void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
671{
672 PGM_LOCK_VOID(pVM);
673 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
674
675 /* Clear the R3 & R0 TLBs completely. */
676 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
677 {
678 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
679 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
680 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
681 }
682
683 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
684 {
685 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
686 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
687 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
688 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
689 }
690
691 PGM_UNLOCK(pVM);
692}
693
694
695/**
696 * Invalidates a page mapping TLB entry
697 *
698 * @param pVM The cross context VM structure.
699 * @param GCPhys GCPhys entry to flush
700 */
701void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
702{
703 PGM_LOCK_ASSERT_OWNER(pVM);
704
705 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
706
707 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
708
709 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
710 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
711 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
712
713 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
714 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
715 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
716 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
717}
718
719
720/**
721 * Makes sure that there is at least one handy page ready for use.
722 *
723 * This will also take the appropriate actions when reaching water-marks.
724 *
725 * @returns VBox status code.
726 * @retval VINF_SUCCESS on success.
727 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
728 *
729 * @param pVM The cross context VM structure.
730 *
731 * @remarks Must be called from within the PGM critical section. It may
732 * nip back to ring-3/0 in some cases.
733 */
734static int pgmPhysEnsureHandyPage(PVMCC pVM)
735{
736 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
737
738 /*
739 * Do we need to do anything special?
740 */
741#ifdef IN_RING3
742 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
743#else
744 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
745#endif
746 {
747 /*
748 * Allocate pages only if we're out of them, or in ring-3, almost out.
749 */
750#ifdef IN_RING3
751 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
752#else
753 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
754#endif
755 {
756 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
757 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
758#ifdef IN_RING3
759 int rc = PGMR3PhysAllocateHandyPages(pVM);
760#else
761 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
762#endif
763 if (RT_UNLIKELY(rc != VINF_SUCCESS))
764 {
765 if (RT_FAILURE(rc))
766 return rc;
767 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
768 if (!pVM->pgm.s.cHandyPages)
769 {
770 LogRel(("PGM: no more handy pages!\n"));
771 return VERR_EM_NO_MEMORY;
772 }
773 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
774 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
775#ifndef IN_RING3
776 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
777#endif
778 }
779 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
780 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
781 ("%u\n", pVM->pgm.s.cHandyPages),
782 VERR_PGM_HANDY_PAGE_IPE);
783 }
784 else
785 {
786 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
787 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
788#ifndef IN_RING3
789 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
790 {
791 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
792 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
793 }
794#endif
795 }
796 }
797
798 return VINF_SUCCESS;
799}
800
801
802/**
803 * Replace a zero or shared page with new page that we can write to.
804 *
805 * @returns The following VBox status codes.
806 * @retval VINF_SUCCESS on success, pPage is modified.
807 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
808 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
809 *
810 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
811 *
812 * @param pVM The cross context VM structure.
813 * @param pPage The physical page tracking structure. This will
814 * be modified on success.
815 * @param GCPhys The address of the page.
816 *
817 * @remarks Must be called from within the PGM critical section. It may
818 * nip back to ring-3/0 in some cases.
819 *
820 * @remarks This function shouldn't really fail, however if it does
821 * it probably means we've screwed up the size of handy pages and/or
822 * the low-water mark. Or, that some device I/O is causing a lot of
823 * pages to be allocated while while the host is in a low-memory
824 * condition. This latter should be handled elsewhere and in a more
825 * controlled manner, it's on the @bugref{3170} todo list...
826 */
827int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
828{
829 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
830
831 /*
832 * Prereqs.
833 */
834 PGM_LOCK_ASSERT_OWNER(pVM);
835 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
836 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
837
838# ifdef PGM_WITH_LARGE_PAGES
839 /*
840 * Try allocate a large page if applicable.
841 */
842 if ( PGMIsUsingLargePages(pVM)
843 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
844 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
845 {
846 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
847 PPGMPAGE pBasePage;
848
849 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
850 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
851 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
852 {
853 rc = pgmPhysAllocLargePage(pVM, GCPhys);
854 if (rc == VINF_SUCCESS)
855 return rc;
856 }
857 /* Mark the base as type page table, so we don't check over and over again. */
858 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
859
860 /* fall back to 4KB pages. */
861 }
862# endif
863
864 /*
865 * Flush any shadow page table mappings of the page.
866 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
867 */
868 bool fFlushTLBs = false;
869 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
870 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
871
872 /*
873 * Ensure that we've got a page handy, take it and use it.
874 */
875 int rc2 = pgmPhysEnsureHandyPage(pVM);
876 if (RT_FAILURE(rc2))
877 {
878 if (fFlushTLBs)
879 PGM_INVL_ALL_VCPU_TLBS(pVM);
880 Assert(rc2 == VERR_EM_NO_MEMORY);
881 return rc2;
882 }
883 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
884 PGM_LOCK_ASSERT_OWNER(pVM);
885 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
886 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
887
888 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
889 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
890 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
891 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
892 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
893 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
894
895 /*
896 * There are one or two action to be taken the next time we allocate handy pages:
897 * - Tell the GMM (global memory manager) what the page is being used for.
898 * (Speeds up replacement operations - sharing and defragmenting.)
899 * - If the current backing is shared, it must be freed.
900 */
901 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
902 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
903
904 void const *pvSharedPage = NULL;
905 if (PGM_PAGE_IS_SHARED(pPage))
906 {
907 /* Mark this shared page for freeing/dereferencing. */
908 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
909 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
910
911 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
912 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
913 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
914 pVM->pgm.s.cSharedPages--;
915
916 /* Grab the address of the page so we can make a copy later on. (safe) */
917 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
918 AssertRC(rc);
919 }
920 else
921 {
922 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
923 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
924 pVM->pgm.s.cZeroPages--;
925 }
926
927 /*
928 * Do the PGMPAGE modifications.
929 */
930 pVM->pgm.s.cPrivatePages++;
931 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
932 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
933 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
934 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
935 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
936
937 /* Copy the shared page contents to the replacement page. */
938 if (pvSharedPage)
939 {
940 /* Get the virtual address of the new page. */
941 PGMPAGEMAPLOCK PgMpLck;
942 void *pvNewPage;
943 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
944 if (RT_SUCCESS(rc))
945 {
946 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
947 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
948 }
949 }
950
951 if ( fFlushTLBs
952 && rc != VINF_PGM_GCPHYS_ALIASED)
953 PGM_INVL_ALL_VCPU_TLBS(pVM);
954
955 /*
956 * Notify NEM about the mapping change for this page.
957 *
958 * Note! Shadow ROM pages are complicated as they can definitely be
959 * allocated while not visible, so play safe.
960 */
961 if (VM_IS_NEM_ENABLED(pVM))
962 {
963 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
964 if ( enmType != PGMPAGETYPE_ROM_SHADOW
965 || pgmPhysGetPage(pVM, GCPhys) == pPage)
966 {
967 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
968 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
969 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
970 if (RT_SUCCESS(rc))
971 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
972 else
973 rc = rc2;
974 }
975 }
976
977 return rc;
978}
979
980#ifdef PGM_WITH_LARGE_PAGES
981
982/**
983 * Replace a 2 MB range of zero pages with new pages that we can write to.
984 *
985 * @returns The following VBox status codes.
986 * @retval VINF_SUCCESS on success, pPage is modified.
987 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
988 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
989 *
990 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
991 *
992 * @param pVM The cross context VM structure.
993 * @param GCPhys The address of the page.
994 *
995 * @remarks Must be called from within the PGM critical section. It may block
996 * on GMM and host mutexes/locks, leaving HM context.
997 */
998int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
999{
1000 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1001 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1002 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1003
1004 /*
1005 * Check Prereqs.
1006 */
1007 PGM_LOCK_ASSERT_OWNER(pVM);
1008 Assert(PGMIsUsingLargePages(pVM));
1009
1010 /*
1011 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1012 */
1013 PPGMPAGE pFirstPage;
1014 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1015 if ( RT_SUCCESS(rc)
1016 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1017 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1018 {
1019 /*
1020 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1021 * since they are unallocated.
1022 */
1023 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1024 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1025 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1026 {
1027 /*
1028 * Now, make sure all the other pages in the 2 MB is in the same state.
1029 */
1030 GCPhys = GCPhysBase;
1031 unsigned cLeft = _2M / PAGE_SIZE;
1032 while (cLeft-- > 0)
1033 {
1034 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1035 if ( pSubPage
1036 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1037 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1038 {
1039 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1040 GCPhys += PAGE_SIZE;
1041 }
1042 else
1043 {
1044 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1045 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1046
1047 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1048 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1049 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1050 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1051 }
1052 }
1053
1054 /*
1055 * Do the allocation.
1056 */
1057# ifdef IN_RING3
1058 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1059# elif defined(IN_RING0)
1060 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1061# else
1062# error "Port me"
1063# endif
1064 if (RT_SUCCESS(rc))
1065 {
1066 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1067 pVM->pgm.s.cLargePages++;
1068 return VINF_SUCCESS;
1069 }
1070
1071 /* If we fail once, it most likely means the host's memory is too
1072 fragmented; don't bother trying again. */
1073 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1074 return rc;
1075 }
1076 }
1077 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1078}
1079
1080
1081/**
1082 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1083 *
1084 * @returns The following VBox status codes.
1085 * @retval VINF_SUCCESS on success, the large page can be used again
1086 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1087 *
1088 * @param pVM The cross context VM structure.
1089 * @param GCPhys The address of the page.
1090 * @param pLargePage Page structure of the base page
1091 */
1092int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1093{
1094 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1095
1096 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1097
1098 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1099
1100 /* Check the base page. */
1101 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1102 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1103 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1104 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1105 {
1106 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1107 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1108 }
1109
1110 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1111 /* Check all remaining pages in the 2 MB range. */
1112 unsigned i;
1113 GCPhys += PAGE_SIZE;
1114 for (i = 1; i < _2M/PAGE_SIZE; i++)
1115 {
1116 PPGMPAGE pPage;
1117 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1118 AssertRCBreak(rc);
1119
1120 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1121 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1122 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1123 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1124 {
1125 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1126 break;
1127 }
1128
1129 GCPhys += PAGE_SIZE;
1130 }
1131 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1132
1133 if (i == _2M/PAGE_SIZE)
1134 {
1135 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1136 pVM->pgm.s.cLargePagesDisabled--;
1137 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1138 return VINF_SUCCESS;
1139 }
1140
1141 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1142}
1143
1144#endif /* PGM_WITH_LARGE_PAGES */
1145
1146
1147/**
1148 * Deal with a write monitored page.
1149 *
1150 * @returns VBox strict status code.
1151 *
1152 * @param pVM The cross context VM structure.
1153 * @param pPage The physical page tracking structure.
1154 * @param GCPhys The guest physical address of the page.
1155 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1156 * very unlikely situation where it is okay that we let NEM
1157 * fix the page access in a lazy fasion.
1158 *
1159 * @remarks Called from within the PGM critical section.
1160 */
1161void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1162{
1163 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1164 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1165 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1166 Assert(pVM->pgm.s.cMonitoredPages > 0);
1167 pVM->pgm.s.cMonitoredPages--;
1168 pVM->pgm.s.cWrittenToPages++;
1169
1170#ifdef VBOX_WITH_NATIVE_NEM
1171 /*
1172 * Notify NEM about the protection change so we won't spin forever.
1173 *
1174 * Note! NEM need to be handle to lazily correct page protection as we cannot
1175 * really get it 100% right here it seems. The page pool does this too.
1176 */
1177 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1178 {
1179 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1180 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1181 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1182 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1183 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1184 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1185 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1186 }
1187#else
1188 RT_NOREF(GCPhys);
1189#endif
1190}
1191
1192
1193/**
1194 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1195 *
1196 * @returns VBox strict status code.
1197 * @retval VINF_SUCCESS on success.
1198 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1199 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1200 *
1201 * @param pVM The cross context VM structure.
1202 * @param pPage The physical page tracking structure.
1203 * @param GCPhys The address of the page.
1204 *
1205 * @remarks Called from within the PGM critical section.
1206 */
1207int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1208{
1209 PGM_LOCK_ASSERT_OWNER(pVM);
1210 switch (PGM_PAGE_GET_STATE(pPage))
1211 {
1212 case PGM_PAGE_STATE_WRITE_MONITORED:
1213 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1214 RT_FALL_THRU();
1215 default: /* to shut up GCC */
1216 case PGM_PAGE_STATE_ALLOCATED:
1217 return VINF_SUCCESS;
1218
1219 /*
1220 * Zero pages can be dummy pages for MMIO or reserved memory,
1221 * so we need to check the flags before joining cause with
1222 * shared page replacement.
1223 */
1224 case PGM_PAGE_STATE_ZERO:
1225 if (PGM_PAGE_IS_MMIO(pPage))
1226 return VERR_PGM_PHYS_PAGE_RESERVED;
1227 RT_FALL_THRU();
1228 case PGM_PAGE_STATE_SHARED:
1229 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1230
1231 /* Not allowed to write to ballooned pages. */
1232 case PGM_PAGE_STATE_BALLOONED:
1233 return VERR_PGM_PHYS_PAGE_BALLOONED;
1234 }
1235}
1236
1237
1238/**
1239 * Internal usage: Map the page specified by its GMM ID.
1240 *
1241 * This is similar to pgmPhysPageMap
1242 *
1243 * @returns VBox status code.
1244 *
1245 * @param pVM The cross context VM structure.
1246 * @param idPage The Page ID.
1247 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1248 * @param ppv Where to store the mapping address.
1249 *
1250 * @remarks Called from within the PGM critical section. The mapping is only
1251 * valid while you are inside this section.
1252 */
1253int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1254{
1255 /*
1256 * Validation.
1257 */
1258 PGM_LOCK_ASSERT_OWNER(pVM);
1259 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1260 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1261 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1262
1263#ifdef IN_RING0
1264# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1265 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
1266# else
1267 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1268# endif
1269
1270#else
1271 /*
1272 * Find/make Chunk TLB entry for the mapping chunk.
1273 */
1274 PPGMCHUNKR3MAP pMap;
1275 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1276 if (pTlbe->idChunk == idChunk)
1277 {
1278 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1279 pMap = pTlbe->pChunk;
1280 }
1281 else
1282 {
1283 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1284
1285 /*
1286 * Find the chunk, map it if necessary.
1287 */
1288 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1289 if (pMap)
1290 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1291 else
1292 {
1293 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1294 if (RT_FAILURE(rc))
1295 return rc;
1296 }
1297
1298 /*
1299 * Enter it into the Chunk TLB.
1300 */
1301 pTlbe->idChunk = idChunk;
1302 pTlbe->pChunk = pMap;
1303 }
1304
1305 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1306 return VINF_SUCCESS;
1307#endif
1308}
1309
1310
1311/**
1312 * Maps a page into the current virtual address space so it can be accessed.
1313 *
1314 * @returns VBox status code.
1315 * @retval VINF_SUCCESS on success.
1316 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1317 *
1318 * @param pVM The cross context VM structure.
1319 * @param pPage The physical page tracking structure.
1320 * @param GCPhys The address of the page.
1321 * @param ppMap Where to store the address of the mapping tracking structure.
1322 * @param ppv Where to store the mapping address of the page. The page
1323 * offset is masked off!
1324 *
1325 * @remarks Called from within the PGM critical section.
1326 */
1327static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1328{
1329 PGM_LOCK_ASSERT_OWNER(pVM);
1330 NOREF(GCPhys);
1331
1332 /*
1333 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1334 */
1335 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1336 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1337 {
1338 /* Decode the page id to a page in a MMIO2 ram range. */
1339 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1340 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1341 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1342 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1343 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1344 pPage->s.idPage, pPage->s.uStateY),
1345 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1346 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1347 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1348 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1349 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1350 *ppMap = NULL;
1351# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1352 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1353# elif defined(IN_RING0)
1354 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << PAGE_SHIFT);
1355 return VINF_SUCCESS;
1356# else
1357 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1358 return VINF_SUCCESS;
1359# endif
1360 }
1361
1362# ifdef VBOX_WITH_PGM_NEM_MODE
1363 if (pVM->pgm.s.fNemMode)
1364 {
1365# ifdef IN_RING3
1366 /*
1367 * Find the corresponding RAM range and use that to locate the mapping address.
1368 */
1369 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1370 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1371 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1372 size_t const idxPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1373 Assert(pPage == &pRam->aPages[idxPage]);
1374 *ppMap = NULL;
1375 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << PAGE_SHIFT);
1376 return VINF_SUCCESS;
1377# else
1378 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1379# endif
1380 }
1381# endif
1382
1383 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1384 if (idChunk == NIL_GMM_CHUNKID)
1385 {
1386 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1387 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1388 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1389 {
1390 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1391 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1392 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1393 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1394 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1395 }
1396 else
1397 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1398 *ppMap = NULL;
1399 return VINF_SUCCESS;
1400 }
1401
1402# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1403 /*
1404 * Just use the physical address.
1405 */
1406 *ppMap = NULL;
1407 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1408
1409# elif defined(IN_RING0)
1410 /*
1411 * Go by page ID thru GMMR0.
1412 */
1413 *ppMap = NULL;
1414 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1415
1416# else
1417 /*
1418 * Find/make Chunk TLB entry for the mapping chunk.
1419 */
1420 PPGMCHUNKR3MAP pMap;
1421 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1422 if (pTlbe->idChunk == idChunk)
1423 {
1424 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1425 pMap = pTlbe->pChunk;
1426 AssertPtr(pMap->pv);
1427 }
1428 else
1429 {
1430 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1431
1432 /*
1433 * Find the chunk, map it if necessary.
1434 */
1435 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1436 if (pMap)
1437 {
1438 AssertPtr(pMap->pv);
1439 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1440 }
1441 else
1442 {
1443 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1444 if (RT_FAILURE(rc))
1445 return rc;
1446 AssertPtr(pMap->pv);
1447 }
1448
1449 /*
1450 * Enter it into the Chunk TLB.
1451 */
1452 pTlbe->idChunk = idChunk;
1453 pTlbe->pChunk = pMap;
1454 }
1455
1456 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1457 *ppMap = pMap;
1458 return VINF_SUCCESS;
1459# endif /* !IN_RING0 */
1460}
1461
1462
1463/**
1464 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1465 *
1466 * This is typically used is paths where we cannot use the TLB methods (like ROM
1467 * pages) or where there is no point in using them since we won't get many hits.
1468 *
1469 * @returns VBox strict status code.
1470 * @retval VINF_SUCCESS on success.
1471 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1472 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1473 *
1474 * @param pVM The cross context VM structure.
1475 * @param pPage The physical page tracking structure.
1476 * @param GCPhys The address of the page.
1477 * @param ppv Where to store the mapping address of the page. The page
1478 * offset is masked off!
1479 *
1480 * @remarks Called from within the PGM critical section. The mapping is only
1481 * valid while you are inside section.
1482 */
1483int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1484{
1485 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1486 if (RT_SUCCESS(rc))
1487 {
1488 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1489 PPGMPAGEMAP pMapIgnore;
1490 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1491 if (RT_FAILURE(rc2)) /* preserve rc */
1492 rc = rc2;
1493 }
1494 return rc;
1495}
1496
1497
1498/**
1499 * Maps a page into the current virtual address space so it can be accessed for
1500 * both writing and reading.
1501 *
1502 * This is typically used is paths where we cannot use the TLB methods (like ROM
1503 * pages) or where there is no point in using them since we won't get many hits.
1504 *
1505 * @returns VBox status code.
1506 * @retval VINF_SUCCESS on success.
1507 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1508 *
1509 * @param pVM The cross context VM structure.
1510 * @param pPage The physical page tracking structure. Must be in the
1511 * allocated state.
1512 * @param GCPhys The address of the page.
1513 * @param ppv Where to store the mapping address of the page. The page
1514 * offset is masked off!
1515 *
1516 * @remarks Called from within the PGM critical section. The mapping is only
1517 * valid while you are inside section.
1518 */
1519int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1520{
1521 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1522 PPGMPAGEMAP pMapIgnore;
1523 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1524}
1525
1526
1527/**
1528 * Maps a page into the current virtual address space so it can be accessed for
1529 * reading.
1530 *
1531 * This is typically used is paths where we cannot use the TLB methods (like ROM
1532 * pages) or where there is no point in using them since we won't get many hits.
1533 *
1534 * @returns VBox status code.
1535 * @retval VINF_SUCCESS on success.
1536 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1537 *
1538 * @param pVM The cross context VM structure.
1539 * @param pPage The physical page tracking structure.
1540 * @param GCPhys The address of the page.
1541 * @param ppv Where to store the mapping address of the page. The page
1542 * offset is masked off!
1543 *
1544 * @remarks Called from within the PGM critical section. The mapping is only
1545 * valid while you are inside this section.
1546 */
1547int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1548{
1549 PPGMPAGEMAP pMapIgnore;
1550 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1551}
1552
1553
1554/**
1555 * Load a guest page into the ring-3 physical TLB.
1556 *
1557 * @returns VBox status code.
1558 * @retval VINF_SUCCESS on success
1559 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1560 * @param pVM The cross context VM structure.
1561 * @param GCPhys The guest physical address in question.
1562 */
1563int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1564{
1565 PGM_LOCK_ASSERT_OWNER(pVM);
1566
1567 /*
1568 * Find the ram range and page and hand it over to the with-page function.
1569 * 99.8% of requests are expected to be in the first range.
1570 */
1571 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1572 if (!pPage)
1573 {
1574 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1575 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1576 }
1577
1578 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1579}
1580
1581
1582/**
1583 * Load a guest page into the ring-3 physical TLB.
1584 *
1585 * @returns VBox status code.
1586 * @retval VINF_SUCCESS on success
1587 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1588 *
1589 * @param pVM The cross context VM structure.
1590 * @param pPage Pointer to the PGMPAGE structure corresponding to
1591 * GCPhys.
1592 * @param GCPhys The guest physical address in question.
1593 */
1594int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1595{
1596 PGM_LOCK_ASSERT_OWNER(pVM);
1597 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1598
1599 /*
1600 * Map the page.
1601 * Make a special case for the zero page as it is kind of special.
1602 */
1603 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1604 if ( !PGM_PAGE_IS_ZERO(pPage)
1605 && !PGM_PAGE_IS_BALLOONED(pPage))
1606 {
1607 void *pv;
1608 PPGMPAGEMAP pMap;
1609 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1610 if (RT_FAILURE(rc))
1611 return rc;
1612# ifndef IN_RING0
1613 pTlbe->pMap = pMap;
1614# endif
1615 pTlbe->pv = pv;
1616 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1617 }
1618 else
1619 {
1620 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1621# ifndef IN_RING0
1622 pTlbe->pMap = NULL;
1623# endif
1624 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1625 }
1626# ifdef PGM_WITH_PHYS_TLB
1627 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1628 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1629 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1630 else
1631 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1632# else
1633 pTlbe->GCPhys = NIL_RTGCPHYS;
1634# endif
1635 pTlbe->pPage = pPage;
1636 return VINF_SUCCESS;
1637}
1638
1639
1640/**
1641 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1642 * own the PGM lock and therefore not need to lock the mapped page.
1643 *
1644 * @returns VBox status code.
1645 * @retval VINF_SUCCESS on success.
1646 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1647 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1648 *
1649 * @param pVM The cross context VM structure.
1650 * @param GCPhys The guest physical address of the page that should be mapped.
1651 * @param pPage Pointer to the PGMPAGE structure for the page.
1652 * @param ppv Where to store the address corresponding to GCPhys.
1653 *
1654 * @internal
1655 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1656 */
1657int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1658{
1659 int rc;
1660 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1661 PGM_LOCK_ASSERT_OWNER(pVM);
1662 pVM->pgm.s.cDeprecatedPageLocks++;
1663
1664 /*
1665 * Make sure the page is writable.
1666 */
1667 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1668 {
1669 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1670 if (RT_FAILURE(rc))
1671 return rc;
1672 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1673 }
1674 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1675
1676 /*
1677 * Get the mapping address.
1678 */
1679 PPGMPAGEMAPTLBE pTlbe;
1680 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1681 if (RT_FAILURE(rc))
1682 return rc;
1683 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1684 return VINF_SUCCESS;
1685}
1686
1687
1688/**
1689 * Locks a page mapping for writing.
1690 *
1691 * @param pVM The cross context VM structure.
1692 * @param pPage The page.
1693 * @param pTlbe The mapping TLB entry for the page.
1694 * @param pLock The lock structure (output).
1695 */
1696DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1697{
1698# ifndef IN_RING0
1699 PPGMPAGEMAP pMap = pTlbe->pMap;
1700 if (pMap)
1701 pMap->cRefs++;
1702# else
1703 RT_NOREF(pTlbe);
1704# endif
1705
1706 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1707 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1708 {
1709 if (cLocks == 0)
1710 pVM->pgm.s.cWriteLockedPages++;
1711 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1712 }
1713 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1714 {
1715 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1716 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1717# ifndef IN_RING0
1718 if (pMap)
1719 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1720# endif
1721 }
1722
1723 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1724# ifndef IN_RING0
1725 pLock->pvMap = pMap;
1726# else
1727 pLock->pvMap = NULL;
1728# endif
1729}
1730
1731/**
1732 * Locks a page mapping for reading.
1733 *
1734 * @param pVM The cross context VM structure.
1735 * @param pPage The page.
1736 * @param pTlbe The mapping TLB entry for the page.
1737 * @param pLock The lock structure (output).
1738 */
1739DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1740{
1741# ifndef IN_RING0
1742 PPGMPAGEMAP pMap = pTlbe->pMap;
1743 if (pMap)
1744 pMap->cRefs++;
1745# else
1746 RT_NOREF(pTlbe);
1747# endif
1748
1749 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1750 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1751 {
1752 if (cLocks == 0)
1753 pVM->pgm.s.cReadLockedPages++;
1754 PGM_PAGE_INC_READ_LOCKS(pPage);
1755 }
1756 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1757 {
1758 PGM_PAGE_INC_READ_LOCKS(pPage);
1759 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1760# ifndef IN_RING0
1761 if (pMap)
1762 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1763# endif
1764 }
1765
1766 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1767# ifndef IN_RING0
1768 pLock->pvMap = pMap;
1769# else
1770 pLock->pvMap = NULL;
1771# endif
1772}
1773
1774
1775/**
1776 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1777 * own the PGM lock and have access to the page structure.
1778 *
1779 * @returns VBox status code.
1780 * @retval VINF_SUCCESS on success.
1781 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1782 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1783 *
1784 * @param pVM The cross context VM structure.
1785 * @param GCPhys The guest physical address of the page that should be mapped.
1786 * @param pPage Pointer to the PGMPAGE structure for the page.
1787 * @param ppv Where to store the address corresponding to GCPhys.
1788 * @param pLock Where to store the lock information that
1789 * pgmPhysReleaseInternalPageMappingLock needs.
1790 *
1791 * @internal
1792 */
1793int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1794{
1795 int rc;
1796 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1797 PGM_LOCK_ASSERT_OWNER(pVM);
1798
1799 /*
1800 * Make sure the page is writable.
1801 */
1802 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1803 {
1804 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1805 if (RT_FAILURE(rc))
1806 return rc;
1807 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1808 }
1809 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1810
1811 /*
1812 * Do the job.
1813 */
1814 PPGMPAGEMAPTLBE pTlbe;
1815 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1816 if (RT_FAILURE(rc))
1817 return rc;
1818 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1819 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1820 return VINF_SUCCESS;
1821}
1822
1823
1824/**
1825 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1826 * own the PGM lock and have access to the page structure.
1827 *
1828 * @returns VBox status code.
1829 * @retval VINF_SUCCESS on success.
1830 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1831 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1832 *
1833 * @param pVM The cross context VM structure.
1834 * @param GCPhys The guest physical address of the page that should be mapped.
1835 * @param pPage Pointer to the PGMPAGE structure for the page.
1836 * @param ppv Where to store the address corresponding to GCPhys.
1837 * @param pLock Where to store the lock information that
1838 * pgmPhysReleaseInternalPageMappingLock needs.
1839 *
1840 * @internal
1841 */
1842int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1843{
1844 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1845 PGM_LOCK_ASSERT_OWNER(pVM);
1846 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1847
1848 /*
1849 * Do the job.
1850 */
1851 PPGMPAGEMAPTLBE pTlbe;
1852 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1853 if (RT_FAILURE(rc))
1854 return rc;
1855 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1856 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1857 return VINF_SUCCESS;
1858}
1859
1860
1861/**
1862 * Requests the mapping of a guest page into the current context.
1863 *
1864 * This API should only be used for very short term, as it will consume scarse
1865 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1866 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1867 *
1868 * This API will assume your intention is to write to the page, and will
1869 * therefore replace shared and zero pages. If you do not intend to modify
1870 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1871 *
1872 * @returns VBox status code.
1873 * @retval VINF_SUCCESS on success.
1874 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1875 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1876 *
1877 * @param pVM The cross context VM structure.
1878 * @param GCPhys The guest physical address of the page that should be
1879 * mapped.
1880 * @param ppv Where to store the address corresponding to GCPhys.
1881 * @param pLock Where to store the lock information that
1882 * PGMPhysReleasePageMappingLock needs.
1883 *
1884 * @remarks The caller is responsible for dealing with access handlers.
1885 * @todo Add an informational return code for pages with access handlers?
1886 *
1887 * @remark Avoid calling this API from within critical sections (other than
1888 * the PGM one) because of the deadlock risk. External threads may
1889 * need to delegate jobs to the EMTs.
1890 * @remarks Only one page is mapped! Make no assumption about what's after or
1891 * before the returned page!
1892 * @thread Any thread.
1893 */
1894VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1895{
1896 int rc = PGM_LOCK(pVM);
1897 AssertRCReturn(rc, rc);
1898
1899 /*
1900 * Query the Physical TLB entry for the page (may fail).
1901 */
1902 PPGMPAGEMAPTLBE pTlbe;
1903 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1904 if (RT_SUCCESS(rc))
1905 {
1906 /*
1907 * If the page is shared, the zero page, or being write monitored
1908 * it must be converted to a page that's writable if possible.
1909 */
1910 PPGMPAGE pPage = pTlbe->pPage;
1911 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1912 {
1913 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1914 if (RT_SUCCESS(rc))
1915 {
1916 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1917 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1918 }
1919 }
1920 if (RT_SUCCESS(rc))
1921 {
1922 /*
1923 * Now, just perform the locking and calculate the return address.
1924 */
1925 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1926 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1927 }
1928 }
1929
1930 PGM_UNLOCK(pVM);
1931 return rc;
1932}
1933
1934
1935/**
1936 * Requests the mapping of a guest page into the current context.
1937 *
1938 * This API should only be used for very short term, as it will consume scarse
1939 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1940 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1941 *
1942 * @returns VBox status code.
1943 * @retval VINF_SUCCESS on success.
1944 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1945 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1946 *
1947 * @param pVM The cross context VM structure.
1948 * @param GCPhys The guest physical address of the page that should be
1949 * mapped.
1950 * @param ppv Where to store the address corresponding to GCPhys.
1951 * @param pLock Where to store the lock information that
1952 * PGMPhysReleasePageMappingLock needs.
1953 *
1954 * @remarks The caller is responsible for dealing with access handlers.
1955 * @todo Add an informational return code for pages with access handlers?
1956 *
1957 * @remarks Avoid calling this API from within critical sections (other than
1958 * the PGM one) because of the deadlock risk.
1959 * @remarks Only one page is mapped! Make no assumption about what's after or
1960 * before the returned page!
1961 * @thread Any thread.
1962 */
1963VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1964{
1965 int rc = PGM_LOCK(pVM);
1966 AssertRCReturn(rc, rc);
1967
1968 /*
1969 * Query the Physical TLB entry for the page (may fail).
1970 */
1971 PPGMPAGEMAPTLBE pTlbe;
1972 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1973 if (RT_SUCCESS(rc))
1974 {
1975 /* MMIO pages doesn't have any readable backing. */
1976 PPGMPAGE pPage = pTlbe->pPage;
1977 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1978 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1979 else
1980 {
1981 /*
1982 * Now, just perform the locking and calculate the return address.
1983 */
1984 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1985 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1986 }
1987 }
1988
1989 PGM_UNLOCK(pVM);
1990 return rc;
1991}
1992
1993
1994/**
1995 * Requests the mapping of a guest page given by virtual address into the current context.
1996 *
1997 * This API should only be used for very short term, as it will consume
1998 * scarse resources (R0 and GC) in the mapping cache. When you're done
1999 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2000 *
2001 * This API will assume your intention is to write to the page, and will
2002 * therefore replace shared and zero pages. If you do not intend to modify
2003 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2004 *
2005 * @returns VBox status code.
2006 * @retval VINF_SUCCESS on success.
2007 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2008 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2009 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2010 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2011 *
2012 * @param pVCpu The cross context virtual CPU structure.
2013 * @param GCPtr The guest physical address of the page that should be
2014 * mapped.
2015 * @param ppv Where to store the address corresponding to GCPhys.
2016 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2017 *
2018 * @remark Avoid calling this API from within critical sections (other than
2019 * the PGM one) because of the deadlock risk.
2020 * @thread EMT
2021 */
2022VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2023{
2024 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2025 RTGCPHYS GCPhys;
2026 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2027 if (RT_SUCCESS(rc))
2028 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2029 return rc;
2030}
2031
2032
2033/**
2034 * Requests the mapping of a guest page given by virtual address into the current context.
2035 *
2036 * This API should only be used for very short term, as it will consume
2037 * scarse resources (R0 and GC) in the mapping cache. When you're done
2038 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2039 *
2040 * @returns VBox status code.
2041 * @retval VINF_SUCCESS on success.
2042 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2043 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2044 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2045 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2046 *
2047 * @param pVCpu The cross context virtual CPU structure.
2048 * @param GCPtr The guest physical address of the page that should be
2049 * mapped.
2050 * @param ppv Where to store the address corresponding to GCPtr.
2051 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2052 *
2053 * @remark Avoid calling this API from within critical sections (other than
2054 * the PGM one) because of the deadlock risk.
2055 * @thread EMT
2056 */
2057VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2058{
2059 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2060 RTGCPHYS GCPhys;
2061 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2062 if (RT_SUCCESS(rc))
2063 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2064 return rc;
2065}
2066
2067
2068/**
2069 * Release the mapping of a guest page.
2070 *
2071 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2072 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2073 *
2074 * @param pVM The cross context VM structure.
2075 * @param pLock The lock structure initialized by the mapping function.
2076 */
2077VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2078{
2079# ifndef IN_RING0
2080 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2081# endif
2082 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2083 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2084
2085 pLock->uPageAndType = 0;
2086 pLock->pvMap = NULL;
2087
2088 PGM_LOCK_VOID(pVM);
2089 if (fWriteLock)
2090 {
2091 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2092 Assert(cLocks > 0);
2093 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2094 {
2095 if (cLocks == 1)
2096 {
2097 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2098 pVM->pgm.s.cWriteLockedPages--;
2099 }
2100 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2101 }
2102
2103 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2104 { /* probably extremely likely */ }
2105 else
2106 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2107 }
2108 else
2109 {
2110 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2111 Assert(cLocks > 0);
2112 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2113 {
2114 if (cLocks == 1)
2115 {
2116 Assert(pVM->pgm.s.cReadLockedPages > 0);
2117 pVM->pgm.s.cReadLockedPages--;
2118 }
2119 PGM_PAGE_DEC_READ_LOCKS(pPage);
2120 }
2121 }
2122
2123# ifndef IN_RING0
2124 if (pMap)
2125 {
2126 Assert(pMap->cRefs >= 1);
2127 pMap->cRefs--;
2128 }
2129# endif
2130 PGM_UNLOCK(pVM);
2131}
2132
2133
2134#ifdef IN_RING3
2135/**
2136 * Release the mapping of multiple guest pages.
2137 *
2138 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2139 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2140 *
2141 * @param pVM The cross context VM structure.
2142 * @param cPages Number of pages to unlock.
2143 * @param paLocks Array of locks lock structure initialized by the mapping
2144 * function.
2145 */
2146VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2147{
2148 Assert(cPages > 0);
2149 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2150#ifdef VBOX_STRICT
2151 for (uint32_t i = 1; i < cPages; i++)
2152 {
2153 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2154 AssertPtr(paLocks[i].uPageAndType);
2155 }
2156#endif
2157
2158 PGM_LOCK_VOID(pVM);
2159 if (fWriteLock)
2160 {
2161 /*
2162 * Write locks:
2163 */
2164 for (uint32_t i = 0; i < cPages; i++)
2165 {
2166 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2167 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2168 Assert(cLocks > 0);
2169 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2170 {
2171 if (cLocks == 1)
2172 {
2173 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2174 pVM->pgm.s.cWriteLockedPages--;
2175 }
2176 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2177 }
2178
2179 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2180 { /* probably extremely likely */ }
2181 else
2182 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2183
2184 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2185 if (pMap)
2186 {
2187 Assert(pMap->cRefs >= 1);
2188 pMap->cRefs--;
2189 }
2190
2191 /* Yield the lock: */
2192 if ((i & 1023) == 1023 && i + 1 < cPages)
2193 {
2194 PGM_UNLOCK(pVM);
2195 PGM_LOCK_VOID(pVM);
2196 }
2197 }
2198 }
2199 else
2200 {
2201 /*
2202 * Read locks:
2203 */
2204 for (uint32_t i = 0; i < cPages; i++)
2205 {
2206 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2207 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2208 Assert(cLocks > 0);
2209 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2210 {
2211 if (cLocks == 1)
2212 {
2213 Assert(pVM->pgm.s.cReadLockedPages > 0);
2214 pVM->pgm.s.cReadLockedPages--;
2215 }
2216 PGM_PAGE_DEC_READ_LOCKS(pPage);
2217 }
2218
2219 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2220 if (pMap)
2221 {
2222 Assert(pMap->cRefs >= 1);
2223 pMap->cRefs--;
2224 }
2225
2226 /* Yield the lock: */
2227 if ((i & 1023) == 1023 && i + 1 < cPages)
2228 {
2229 PGM_UNLOCK(pVM);
2230 PGM_LOCK_VOID(pVM);
2231 }
2232 }
2233 }
2234 PGM_UNLOCK(pVM);
2235
2236 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2237}
2238#endif /* IN_RING3 */
2239
2240
2241/**
2242 * Release the internal mapping of a guest page.
2243 *
2244 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2245 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2246 *
2247 * @param pVM The cross context VM structure.
2248 * @param pLock The lock structure initialized by the mapping function.
2249 *
2250 * @remarks Caller must hold the PGM lock.
2251 */
2252void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2253{
2254 PGM_LOCK_ASSERT_OWNER(pVM);
2255 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2256}
2257
2258
2259/**
2260 * Converts a GC physical address to a HC ring-3 pointer.
2261 *
2262 * @returns VINF_SUCCESS on success.
2263 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2264 * page but has no physical backing.
2265 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2266 * GC physical address.
2267 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2268 * a dynamic ram chunk boundary
2269 *
2270 * @param pVM The cross context VM structure.
2271 * @param GCPhys The GC physical address to convert.
2272 * @param pR3Ptr Where to store the R3 pointer on success.
2273 *
2274 * @deprecated Avoid when possible!
2275 */
2276int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2277{
2278/** @todo this is kind of hacky and needs some more work. */
2279#ifndef DEBUG_sandervl
2280 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2281#endif
2282
2283 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2284 PGM_LOCK_VOID(pVM);
2285
2286 PPGMRAMRANGE pRam;
2287 PPGMPAGE pPage;
2288 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2289 if (RT_SUCCESS(rc))
2290 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2291
2292 PGM_UNLOCK(pVM);
2293 Assert(rc <= VINF_SUCCESS);
2294 return rc;
2295}
2296
2297
2298/**
2299 * Converts a guest pointer to a GC physical address.
2300 *
2301 * This uses the current CR3/CR0/CR4 of the guest.
2302 *
2303 * @returns VBox status code.
2304 * @param pVCpu The cross context virtual CPU structure.
2305 * @param GCPtr The guest pointer to convert.
2306 * @param pGCPhys Where to store the GC physical address.
2307 */
2308VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2309{
2310 PGMPTWALK Walk;
2311 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2312 if (pGCPhys && RT_SUCCESS(rc))
2313 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK);
2314 return rc;
2315}
2316
2317
2318/**
2319 * Converts a guest pointer to a HC physical address.
2320 *
2321 * This uses the current CR3/CR0/CR4 of the guest.
2322 *
2323 * @returns VBox status code.
2324 * @param pVCpu The cross context virtual CPU structure.
2325 * @param GCPtr The guest pointer to convert.
2326 * @param pHCPhys Where to store the HC physical address.
2327 */
2328VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2329{
2330 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2331 PGMPTWALK Walk;
2332 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2333 if (RT_SUCCESS(rc))
2334 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2335 return rc;
2336}
2337
2338
2339
2340#undef LOG_GROUP
2341#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2342
2343
2344#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2345/**
2346 * Cache PGMPhys memory access
2347 *
2348 * @param pVM The cross context VM structure.
2349 * @param pCache Cache structure pointer
2350 * @param GCPhys GC physical address
2351 * @param pbHC HC pointer corresponding to physical page
2352 *
2353 * @thread EMT.
2354 */
2355static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2356{
2357 uint32_t iCacheIndex;
2358
2359 Assert(VM_IS_EMT(pVM));
2360
2361 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2362 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2363
2364 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2365
2366 ASMBitSet(&pCache->aEntries, iCacheIndex);
2367
2368 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2369 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2370}
2371#endif /* IN_RING3 */
2372
2373
2374/**
2375 * Deals with reading from a page with one or more ALL access handlers.
2376 *
2377 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2378 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2379 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2380 *
2381 * @param pVM The cross context VM structure.
2382 * @param pPage The page descriptor.
2383 * @param GCPhys The physical address to start reading at.
2384 * @param pvBuf Where to put the bits we read.
2385 * @param cb How much to read - less or equal to a page.
2386 * @param enmOrigin The origin of this call.
2387 */
2388static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2389 PGMACCESSORIGIN enmOrigin)
2390{
2391 /*
2392 * The most frequent access here is MMIO and shadowed ROM.
2393 * The current code ASSUMES all these access handlers covers full pages!
2394 */
2395
2396 /*
2397 * Whatever we do we need the source page, map it first.
2398 */
2399 PGMPAGEMAPLOCK PgMpLck;
2400 const void *pvSrc = NULL;
2401 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2402/** @todo Check how this can work for MMIO pages? */
2403 if (RT_FAILURE(rc))
2404 {
2405 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2406 GCPhys, pPage, rc));
2407 memset(pvBuf, 0xff, cb);
2408 return VINF_SUCCESS;
2409 }
2410
2411 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2412
2413 /*
2414 * Deal with any physical handlers.
2415 */
2416 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2417 PPGMPHYSHANDLER pPhys = NULL;
2418 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2419 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2420 {
2421 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2422 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2423 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2424 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2425 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2426#ifndef IN_RING3
2427 if (enmOrigin != PGMACCESSORIGIN_IEM)
2428 {
2429 /* Cannot reliably handle informational status codes in this context */
2430 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2431 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2432 }
2433#endif
2434 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2435 void *pvUser = pPhys->CTX_SUFF(pvUser);
2436
2437 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2438 STAM_PROFILE_START(&pPhys->Stat, h);
2439 PGM_LOCK_ASSERT_OWNER(pVM);
2440
2441 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2442 PGM_UNLOCK(pVM);
2443 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2444 PGM_LOCK_VOID(pVM);
2445
2446#ifdef VBOX_WITH_STATISTICS
2447 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2448 if (pPhys)
2449 STAM_PROFILE_STOP(&pPhys->Stat, h);
2450#else
2451 pPhys = NULL; /* might not be valid anymore. */
2452#endif
2453 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2454 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2455 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2456 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2457 {
2458 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2459 return rcStrict;
2460 }
2461 }
2462
2463 /*
2464 * Take the default action.
2465 */
2466 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2467 {
2468 memcpy(pvBuf, pvSrc, cb);
2469 rcStrict = VINF_SUCCESS;
2470 }
2471 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2472 return rcStrict;
2473}
2474
2475
2476/**
2477 * Read physical memory.
2478 *
2479 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2480 * want to ignore those.
2481 *
2482 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2483 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2484 * @retval VINF_SUCCESS in all context - read completed.
2485 *
2486 * @retval VINF_EM_OFF in RC and R0 - read completed.
2487 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2488 * @retval VINF_EM_RESET in RC and R0 - read completed.
2489 * @retval VINF_EM_HALT in RC and R0 - read completed.
2490 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2491 *
2492 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2493 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2494 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2495 *
2496 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2497 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2498 *
2499 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2500 *
2501 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2502 * haven't been cleared for strict status codes yet.
2503 *
2504 * @param pVM The cross context VM structure.
2505 * @param GCPhys Physical address start reading from.
2506 * @param pvBuf Where to put the read bits.
2507 * @param cbRead How many bytes to read.
2508 * @param enmOrigin The origin of this call.
2509 */
2510VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2511{
2512 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2513 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2514
2515 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2516 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2517
2518 PGM_LOCK_VOID(pVM);
2519
2520 /*
2521 * Copy loop on ram ranges.
2522 */
2523 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2524 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2525 for (;;)
2526 {
2527 /* Inside range or not? */
2528 if (pRam && GCPhys >= pRam->GCPhys)
2529 {
2530 /*
2531 * Must work our way thru this page by page.
2532 */
2533 RTGCPHYS off = GCPhys - pRam->GCPhys;
2534 while (off < pRam->cb)
2535 {
2536 unsigned iPage = off >> PAGE_SHIFT;
2537 PPGMPAGE pPage = &pRam->aPages[iPage];
2538 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2539 if (cb > cbRead)
2540 cb = cbRead;
2541
2542 /*
2543 * Normal page? Get the pointer to it.
2544 */
2545 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2546 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2547 {
2548 /*
2549 * Get the pointer to the page.
2550 */
2551 PGMPAGEMAPLOCK PgMpLck;
2552 const void *pvSrc;
2553 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2554 if (RT_SUCCESS(rc))
2555 {
2556 memcpy(pvBuf, pvSrc, cb);
2557 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2558 }
2559 else
2560 {
2561 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2562 pRam->GCPhys + off, pPage, rc));
2563 memset(pvBuf, 0xff, cb);
2564 }
2565 }
2566 /*
2567 * Have ALL/MMIO access handlers.
2568 */
2569 else
2570 {
2571 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2572 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2573 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2574 else
2575 {
2576 memset(pvBuf, 0xff, cb);
2577 PGM_UNLOCK(pVM);
2578 return rcStrict2;
2579 }
2580 }
2581
2582 /* next page */
2583 if (cb >= cbRead)
2584 {
2585 PGM_UNLOCK(pVM);
2586 return rcStrict;
2587 }
2588 cbRead -= cb;
2589 off += cb;
2590 pvBuf = (char *)pvBuf + cb;
2591 } /* walk pages in ram range. */
2592
2593 GCPhys = pRam->GCPhysLast + 1;
2594 }
2595 else
2596 {
2597 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2598
2599 /*
2600 * Unassigned address space.
2601 */
2602 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2603 if (cb >= cbRead)
2604 {
2605 memset(pvBuf, 0xff, cbRead);
2606 break;
2607 }
2608 memset(pvBuf, 0xff, cb);
2609
2610 cbRead -= cb;
2611 pvBuf = (char *)pvBuf + cb;
2612 GCPhys += cb;
2613 }
2614
2615 /* Advance range if necessary. */
2616 while (pRam && GCPhys > pRam->GCPhysLast)
2617 pRam = pRam->CTX_SUFF(pNext);
2618 } /* Ram range walk */
2619
2620 PGM_UNLOCK(pVM);
2621 return rcStrict;
2622}
2623
2624
2625/**
2626 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2627 *
2628 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2629 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2630 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2631 *
2632 * @param pVM The cross context VM structure.
2633 * @param pPage The page descriptor.
2634 * @param GCPhys The physical address to start writing at.
2635 * @param pvBuf What to write.
2636 * @param cbWrite How much to write - less or equal to a page.
2637 * @param enmOrigin The origin of this call.
2638 */
2639static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2640 PGMACCESSORIGIN enmOrigin)
2641{
2642 PGMPAGEMAPLOCK PgMpLck;
2643 void *pvDst = NULL;
2644 VBOXSTRICTRC rcStrict;
2645
2646 /*
2647 * Give priority to physical handlers (like #PF does).
2648 *
2649 * Hope for a lonely physical handler first that covers the whole
2650 * write area. This should be a pretty frequent case with MMIO and
2651 * the heavy usage of full page handlers in the page pool.
2652 */
2653 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2654 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2655 if (pCur)
2656 {
2657 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2658#ifndef IN_RING3
2659 if (enmOrigin != PGMACCESSORIGIN_IEM)
2660 /* Cannot reliably handle informational status codes in this context */
2661 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2662#endif
2663 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2664 if (cbRange > cbWrite)
2665 cbRange = cbWrite;
2666
2667 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2668 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2669 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2670 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2671 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2672 else
2673 rcStrict = VINF_SUCCESS;
2674 if (RT_SUCCESS(rcStrict))
2675 {
2676 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
2677 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler);
2678 void * const pvUser = pCur->CTX_SUFF(pvUser);
2679 STAM_PROFILE_START(&pCur->Stat, h);
2680
2681 /* Most handlers will want to release the PGM lock for deadlock prevention
2682 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2683 dirty page trackers will want to keep it for performance reasons. */
2684 PGM_LOCK_ASSERT_OWNER(pVM);
2685 if (pCurType->fKeepPgmLock)
2686 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2687 else
2688 {
2689 PGM_UNLOCK(pVM);
2690 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2691 PGM_LOCK_VOID(pVM);
2692 }
2693
2694#ifdef VBOX_WITH_STATISTICS
2695 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2696 if (pCur)
2697 STAM_PROFILE_STOP(&pCur->Stat, h);
2698#else
2699 pCur = NULL; /* might not be valid anymore. */
2700#endif
2701 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2702 {
2703 if (pvDst)
2704 memcpy(pvDst, pvBuf, cbRange);
2705 rcStrict = VINF_SUCCESS;
2706 }
2707 else
2708 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2709 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2710 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2711 }
2712 else
2713 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2714 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2715 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2716 {
2717 if (pvDst)
2718 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2719 return rcStrict;
2720 }
2721
2722 /* more fun to be had below */
2723 cbWrite -= cbRange;
2724 GCPhys += cbRange;
2725 pvBuf = (uint8_t *)pvBuf + cbRange;
2726 pvDst = (uint8_t *)pvDst + cbRange;
2727 }
2728 else /* The handler is somewhere else in the page, deal with it below. */
2729 rcStrict = VINF_SUCCESS;
2730 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2731
2732 /*
2733 * Deal with all the odd ends (used to be deal with virt+phys).
2734 */
2735 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2736
2737 /* We need a writable destination page. */
2738 if (!pvDst)
2739 {
2740 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2741 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2742 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2743 rc2);
2744 }
2745
2746 /* The loop state (big + ugly). */
2747 PPGMPHYSHANDLER pPhys = NULL;
2748 uint32_t offPhys = PAGE_SIZE;
2749 uint32_t offPhysLast = PAGE_SIZE;
2750 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2751
2752 /* The loop. */
2753 for (;;)
2754 {
2755 if (fMorePhys && !pPhys)
2756 {
2757 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2758 if (pPhys)
2759 {
2760 offPhys = 0;
2761 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2762 }
2763 else
2764 {
2765 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2766 GCPhys, true /* fAbove */);
2767 if ( pPhys
2768 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2769 {
2770 offPhys = pPhys->Core.Key - GCPhys;
2771 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2772 }
2773 else
2774 {
2775 pPhys = NULL;
2776 fMorePhys = false;
2777 offPhys = offPhysLast = PAGE_SIZE;
2778 }
2779 }
2780 }
2781
2782 /*
2783 * Handle access to space without handlers (that's easy).
2784 */
2785 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2786 uint32_t cbRange = (uint32_t)cbWrite;
2787
2788 /*
2789 * Physical handler.
2790 */
2791 if (!offPhys)
2792 {
2793#ifndef IN_RING3
2794 if (enmOrigin != PGMACCESSORIGIN_IEM)
2795 /* Cannot reliably handle informational status codes in this context */
2796 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2797#endif
2798 if (cbRange > offPhysLast + 1)
2799 cbRange = offPhysLast + 1;
2800
2801 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys);
2802 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler);
2803 void * const pvUser = pPhys->CTX_SUFF(pvUser);
2804
2805 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2806 STAM_PROFILE_START(&pPhys->Stat, h);
2807
2808 /* Most handlers will want to release the PGM lock for deadlock prevention
2809 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2810 dirty page trackers will want to keep it for performance reasons. */
2811 PGM_LOCK_ASSERT_OWNER(pVM);
2812 if (pCurType->fKeepPgmLock)
2813 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2814 else
2815 {
2816 PGM_UNLOCK(pVM);
2817 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2818 PGM_LOCK_VOID(pVM);
2819 }
2820
2821#ifdef VBOX_WITH_STATISTICS
2822 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2823 if (pPhys)
2824 STAM_PROFILE_STOP(&pPhys->Stat, h);
2825#else
2826 pPhys = NULL; /* might not be valid anymore. */
2827#endif
2828 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2829 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2830 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2831 }
2832
2833 /*
2834 * Execute the default action and merge the status codes.
2835 */
2836 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2837 {
2838 memcpy(pvDst, pvBuf, cbRange);
2839 rcStrict2 = VINF_SUCCESS;
2840 }
2841 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2842 {
2843 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2844 return rcStrict2;
2845 }
2846 else
2847 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2848
2849 /*
2850 * Advance if we've got more stuff to do.
2851 */
2852 if (cbRange >= cbWrite)
2853 {
2854 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2855 return rcStrict;
2856 }
2857
2858
2859 cbWrite -= cbRange;
2860 GCPhys += cbRange;
2861 pvBuf = (uint8_t *)pvBuf + cbRange;
2862 pvDst = (uint8_t *)pvDst + cbRange;
2863
2864 offPhys -= cbRange;
2865 offPhysLast -= cbRange;
2866 }
2867}
2868
2869
2870/**
2871 * Write to physical memory.
2872 *
2873 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2874 * want to ignore those.
2875 *
2876 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2877 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2878 * @retval VINF_SUCCESS in all context - write completed.
2879 *
2880 * @retval VINF_EM_OFF in RC and R0 - write completed.
2881 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2882 * @retval VINF_EM_RESET in RC and R0 - write completed.
2883 * @retval VINF_EM_HALT in RC and R0 - write completed.
2884 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2885 *
2886 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2887 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2888 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2889 *
2890 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2891 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2892 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2893 *
2894 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2895 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2896 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2897 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2898 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2899 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2900 *
2901 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2902 * haven't been cleared for strict status codes yet.
2903 *
2904 *
2905 * @param pVM The cross context VM structure.
2906 * @param GCPhys Physical address to write to.
2907 * @param pvBuf What to write.
2908 * @param cbWrite How many bytes to write.
2909 * @param enmOrigin Who is calling.
2910 */
2911VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2912{
2913 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2914 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2915 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2916
2917 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2918 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2919
2920 PGM_LOCK_VOID(pVM);
2921
2922 /*
2923 * Copy loop on ram ranges.
2924 */
2925 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2926 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2927 for (;;)
2928 {
2929 /* Inside range or not? */
2930 if (pRam && GCPhys >= pRam->GCPhys)
2931 {
2932 /*
2933 * Must work our way thru this page by page.
2934 */
2935 RTGCPTR off = GCPhys - pRam->GCPhys;
2936 while (off < pRam->cb)
2937 {
2938 RTGCPTR iPage = off >> PAGE_SHIFT;
2939 PPGMPAGE pPage = &pRam->aPages[iPage];
2940 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2941 if (cb > cbWrite)
2942 cb = cbWrite;
2943
2944 /*
2945 * Normal page? Get the pointer to it.
2946 */
2947 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2948 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2949 {
2950 PGMPAGEMAPLOCK PgMpLck;
2951 void *pvDst;
2952 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2953 if (RT_SUCCESS(rc))
2954 {
2955 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2956 memcpy(pvDst, pvBuf, cb);
2957 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2958 }
2959 /* Ignore writes to ballooned pages. */
2960 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2961 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2962 pRam->GCPhys + off, pPage, rc));
2963 }
2964 /*
2965 * Active WRITE or ALL access handlers.
2966 */
2967 else
2968 {
2969 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2970 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2971 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2972 else
2973 {
2974 PGM_UNLOCK(pVM);
2975 return rcStrict2;
2976 }
2977 }
2978
2979 /* next page */
2980 if (cb >= cbWrite)
2981 {
2982 PGM_UNLOCK(pVM);
2983 return rcStrict;
2984 }
2985
2986 cbWrite -= cb;
2987 off += cb;
2988 pvBuf = (const char *)pvBuf + cb;
2989 } /* walk pages in ram range */
2990
2991 GCPhys = pRam->GCPhysLast + 1;
2992 }
2993 else
2994 {
2995 /*
2996 * Unassigned address space, skip it.
2997 */
2998 if (!pRam)
2999 break;
3000 size_t cb = pRam->GCPhys - GCPhys;
3001 if (cb >= cbWrite)
3002 break;
3003 cbWrite -= cb;
3004 pvBuf = (const char *)pvBuf + cb;
3005 GCPhys += cb;
3006 }
3007
3008 /* Advance range if necessary. */
3009 while (pRam && GCPhys > pRam->GCPhysLast)
3010 pRam = pRam->CTX_SUFF(pNext);
3011 } /* Ram range walk */
3012
3013 PGM_UNLOCK(pVM);
3014 return rcStrict;
3015}
3016
3017
3018/**
3019 * Read from guest physical memory by GC physical address, bypassing
3020 * MMIO and access handlers.
3021 *
3022 * @returns VBox status code.
3023 * @param pVM The cross context VM structure.
3024 * @param pvDst The destination address.
3025 * @param GCPhysSrc The source address (GC physical address).
3026 * @param cb The number of bytes to read.
3027 */
3028VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3029{
3030 /*
3031 * Treat the first page as a special case.
3032 */
3033 if (!cb)
3034 return VINF_SUCCESS;
3035
3036 /* map the 1st page */
3037 void const *pvSrc;
3038 PGMPAGEMAPLOCK Lock;
3039 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3040 if (RT_FAILURE(rc))
3041 return rc;
3042
3043 /* optimize for the case where access is completely within the first page. */
3044 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
3045 if (RT_LIKELY(cb <= cbPage))
3046 {
3047 memcpy(pvDst, pvSrc, cb);
3048 PGMPhysReleasePageMappingLock(pVM, &Lock);
3049 return VINF_SUCCESS;
3050 }
3051
3052 /* copy to the end of the page. */
3053 memcpy(pvDst, pvSrc, cbPage);
3054 PGMPhysReleasePageMappingLock(pVM, &Lock);
3055 GCPhysSrc += cbPage;
3056 pvDst = (uint8_t *)pvDst + cbPage;
3057 cb -= cbPage;
3058
3059 /*
3060 * Page by page.
3061 */
3062 for (;;)
3063 {
3064 /* map the page */
3065 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3066 if (RT_FAILURE(rc))
3067 return rc;
3068
3069 /* last page? */
3070 if (cb <= PAGE_SIZE)
3071 {
3072 memcpy(pvDst, pvSrc, cb);
3073 PGMPhysReleasePageMappingLock(pVM, &Lock);
3074 return VINF_SUCCESS;
3075 }
3076
3077 /* copy the entire page and advance */
3078 memcpy(pvDst, pvSrc, PAGE_SIZE);
3079 PGMPhysReleasePageMappingLock(pVM, &Lock);
3080 GCPhysSrc += PAGE_SIZE;
3081 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3082 cb -= PAGE_SIZE;
3083 }
3084 /* won't ever get here. */
3085}
3086
3087
3088/**
3089 * Write to guest physical memory referenced by GC pointer.
3090 * Write memory to GC physical address in guest physical memory.
3091 *
3092 * This will bypass MMIO and access handlers.
3093 *
3094 * @returns VBox status code.
3095 * @param pVM The cross context VM structure.
3096 * @param GCPhysDst The GC physical address of the destination.
3097 * @param pvSrc The source buffer.
3098 * @param cb The number of bytes to write.
3099 */
3100VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3101{
3102 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3103
3104 /*
3105 * Treat the first page as a special case.
3106 */
3107 if (!cb)
3108 return VINF_SUCCESS;
3109
3110 /* map the 1st page */
3111 void *pvDst;
3112 PGMPAGEMAPLOCK Lock;
3113 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3114 if (RT_FAILURE(rc))
3115 return rc;
3116
3117 /* optimize for the case where access is completely within the first page. */
3118 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
3119 if (RT_LIKELY(cb <= cbPage))
3120 {
3121 memcpy(pvDst, pvSrc, cb);
3122 PGMPhysReleasePageMappingLock(pVM, &Lock);
3123 return VINF_SUCCESS;
3124 }
3125
3126 /* copy to the end of the page. */
3127 memcpy(pvDst, pvSrc, cbPage);
3128 PGMPhysReleasePageMappingLock(pVM, &Lock);
3129 GCPhysDst += cbPage;
3130 pvSrc = (const uint8_t *)pvSrc + cbPage;
3131 cb -= cbPage;
3132
3133 /*
3134 * Page by page.
3135 */
3136 for (;;)
3137 {
3138 /* map the page */
3139 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3140 if (RT_FAILURE(rc))
3141 return rc;
3142
3143 /* last page? */
3144 if (cb <= PAGE_SIZE)
3145 {
3146 memcpy(pvDst, pvSrc, cb);
3147 PGMPhysReleasePageMappingLock(pVM, &Lock);
3148 return VINF_SUCCESS;
3149 }
3150
3151 /* copy the entire page and advance */
3152 memcpy(pvDst, pvSrc, PAGE_SIZE);
3153 PGMPhysReleasePageMappingLock(pVM, &Lock);
3154 GCPhysDst += PAGE_SIZE;
3155 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3156 cb -= PAGE_SIZE;
3157 }
3158 /* won't ever get here. */
3159}
3160
3161
3162/**
3163 * Read from guest physical memory referenced by GC pointer.
3164 *
3165 * This function uses the current CR3/CR0/CR4 of the guest and will
3166 * bypass access handlers and not set any accessed bits.
3167 *
3168 * @returns VBox status code.
3169 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3170 * @param pvDst The destination address.
3171 * @param GCPtrSrc The source address (GC pointer).
3172 * @param cb The number of bytes to read.
3173 */
3174VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3175{
3176 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3177/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3178
3179 /*
3180 * Treat the first page as a special case.
3181 */
3182 if (!cb)
3183 return VINF_SUCCESS;
3184
3185 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3186 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3187
3188 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3189 * when many VCPUs are fighting for the lock.
3190 */
3191 PGM_LOCK_VOID(pVM);
3192
3193 /* map the 1st page */
3194 void const *pvSrc;
3195 PGMPAGEMAPLOCK Lock;
3196 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3197 if (RT_FAILURE(rc))
3198 {
3199 PGM_UNLOCK(pVM);
3200 return rc;
3201 }
3202
3203 /* optimize for the case where access is completely within the first page. */
3204 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3205 if (RT_LIKELY(cb <= cbPage))
3206 {
3207 memcpy(pvDst, pvSrc, cb);
3208 PGMPhysReleasePageMappingLock(pVM, &Lock);
3209 PGM_UNLOCK(pVM);
3210 return VINF_SUCCESS;
3211 }
3212
3213 /* copy to the end of the page. */
3214 memcpy(pvDst, pvSrc, cbPage);
3215 PGMPhysReleasePageMappingLock(pVM, &Lock);
3216 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3217 pvDst = (uint8_t *)pvDst + cbPage;
3218 cb -= cbPage;
3219
3220 /*
3221 * Page by page.
3222 */
3223 for (;;)
3224 {
3225 /* map the page */
3226 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3227 if (RT_FAILURE(rc))
3228 {
3229 PGM_UNLOCK(pVM);
3230 return rc;
3231 }
3232
3233 /* last page? */
3234 if (cb <= PAGE_SIZE)
3235 {
3236 memcpy(pvDst, pvSrc, cb);
3237 PGMPhysReleasePageMappingLock(pVM, &Lock);
3238 PGM_UNLOCK(pVM);
3239 return VINF_SUCCESS;
3240 }
3241
3242 /* copy the entire page and advance */
3243 memcpy(pvDst, pvSrc, PAGE_SIZE);
3244 PGMPhysReleasePageMappingLock(pVM, &Lock);
3245 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3246 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3247 cb -= PAGE_SIZE;
3248 }
3249 /* won't ever get here. */
3250}
3251
3252
3253/**
3254 * Write to guest physical memory referenced by GC pointer.
3255 *
3256 * This function uses the current CR3/CR0/CR4 of the guest and will
3257 * bypass access handlers and not set dirty or accessed bits.
3258 *
3259 * @returns VBox status code.
3260 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3261 * @param GCPtrDst The destination address (GC pointer).
3262 * @param pvSrc The source address.
3263 * @param cb The number of bytes to write.
3264 */
3265VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3266{
3267 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3268 VMCPU_ASSERT_EMT(pVCpu);
3269
3270 /*
3271 * Treat the first page as a special case.
3272 */
3273 if (!cb)
3274 return VINF_SUCCESS;
3275
3276 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3277 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3278
3279 /* map the 1st page */
3280 void *pvDst;
3281 PGMPAGEMAPLOCK Lock;
3282 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3283 if (RT_FAILURE(rc))
3284 return rc;
3285
3286 /* optimize for the case where access is completely within the first page. */
3287 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3288 if (RT_LIKELY(cb <= cbPage))
3289 {
3290 memcpy(pvDst, pvSrc, cb);
3291 PGMPhysReleasePageMappingLock(pVM, &Lock);
3292 return VINF_SUCCESS;
3293 }
3294
3295 /* copy to the end of the page. */
3296 memcpy(pvDst, pvSrc, cbPage);
3297 PGMPhysReleasePageMappingLock(pVM, &Lock);
3298 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3299 pvSrc = (const uint8_t *)pvSrc + cbPage;
3300 cb -= cbPage;
3301
3302 /*
3303 * Page by page.
3304 */
3305 for (;;)
3306 {
3307 /* map the page */
3308 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3309 if (RT_FAILURE(rc))
3310 return rc;
3311
3312 /* last page? */
3313 if (cb <= PAGE_SIZE)
3314 {
3315 memcpy(pvDst, pvSrc, cb);
3316 PGMPhysReleasePageMappingLock(pVM, &Lock);
3317 return VINF_SUCCESS;
3318 }
3319
3320 /* copy the entire page and advance */
3321 memcpy(pvDst, pvSrc, PAGE_SIZE);
3322 PGMPhysReleasePageMappingLock(pVM, &Lock);
3323 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3324 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3325 cb -= PAGE_SIZE;
3326 }
3327 /* won't ever get here. */
3328}
3329
3330
3331/**
3332 * Write to guest physical memory referenced by GC pointer and update the PTE.
3333 *
3334 * This function uses the current CR3/CR0/CR4 of the guest and will
3335 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3336 *
3337 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3338 *
3339 * @returns VBox status code.
3340 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3341 * @param GCPtrDst The destination address (GC pointer).
3342 * @param pvSrc The source address.
3343 * @param cb The number of bytes to write.
3344 */
3345VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3346{
3347 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3348 VMCPU_ASSERT_EMT(pVCpu);
3349
3350 /*
3351 * Treat the first page as a special case.
3352 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3353 */
3354 if (!cb)
3355 return VINF_SUCCESS;
3356
3357 /* map the 1st page */
3358 void *pvDst;
3359 PGMPAGEMAPLOCK Lock;
3360 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3361 if (RT_FAILURE(rc))
3362 return rc;
3363
3364 /* optimize for the case where access is completely within the first page. */
3365 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3366 if (RT_LIKELY(cb <= cbPage))
3367 {
3368 memcpy(pvDst, pvSrc, cb);
3369 PGMPhysReleasePageMappingLock(pVM, &Lock);
3370 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3371 return VINF_SUCCESS;
3372 }
3373
3374 /* copy to the end of the page. */
3375 memcpy(pvDst, pvSrc, cbPage);
3376 PGMPhysReleasePageMappingLock(pVM, &Lock);
3377 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3378 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3379 pvSrc = (const uint8_t *)pvSrc + cbPage;
3380 cb -= cbPage;
3381
3382 /*
3383 * Page by page.
3384 */
3385 for (;;)
3386 {
3387 /* map the page */
3388 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3389 if (RT_FAILURE(rc))
3390 return rc;
3391
3392 /* last page? */
3393 if (cb <= PAGE_SIZE)
3394 {
3395 memcpy(pvDst, pvSrc, cb);
3396 PGMPhysReleasePageMappingLock(pVM, &Lock);
3397 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3398 return VINF_SUCCESS;
3399 }
3400
3401 /* copy the entire page and advance */
3402 memcpy(pvDst, pvSrc, PAGE_SIZE);
3403 PGMPhysReleasePageMappingLock(pVM, &Lock);
3404 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3405 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3406 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3407 cb -= PAGE_SIZE;
3408 }
3409 /* won't ever get here. */
3410}
3411
3412
3413/**
3414 * Read from guest physical memory referenced by GC pointer.
3415 *
3416 * This function uses the current CR3/CR0/CR4 of the guest and will
3417 * respect access handlers and set accessed bits.
3418 *
3419 * @returns Strict VBox status, see PGMPhysRead for details.
3420 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3421 * specified virtual address.
3422 *
3423 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3424 * @param pvDst The destination address.
3425 * @param GCPtrSrc The source address (GC pointer).
3426 * @param cb The number of bytes to read.
3427 * @param enmOrigin Who is calling.
3428 * @thread EMT(pVCpu)
3429 */
3430VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3431{
3432 int rc;
3433 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3434 VMCPU_ASSERT_EMT(pVCpu);
3435
3436 /*
3437 * Anything to do?
3438 */
3439 if (!cb)
3440 return VINF_SUCCESS;
3441
3442 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3443
3444 /*
3445 * Optimize reads within a single page.
3446 */
3447 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3448 {
3449 /* Convert virtual to physical address + flags */
3450 PGMPTWALK Walk;
3451 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3452 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3453 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3454
3455 /* mark the guest page as accessed. */
3456 if (!(Walk.fEffective & X86_PTE_A))
3457 {
3458 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3459 AssertRC(rc);
3460 }
3461
3462 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3463 }
3464
3465 /*
3466 * Page by page.
3467 */
3468 for (;;)
3469 {
3470 /* Convert virtual to physical address + flags */
3471 PGMPTWALK Walk;
3472 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3473 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3474 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3475
3476 /* mark the guest page as accessed. */
3477 if (!(Walk.fEffective & X86_PTE_A))
3478 {
3479 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3480 AssertRC(rc);
3481 }
3482
3483 /* copy */
3484 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3485 if (cbRead < cb)
3486 {
3487 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3488 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3489 { /* likely */ }
3490 else
3491 return rcStrict;
3492 }
3493 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3494 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3495
3496 /* next */
3497 Assert(cb > cbRead);
3498 cb -= cbRead;
3499 pvDst = (uint8_t *)pvDst + cbRead;
3500 GCPtrSrc += cbRead;
3501 }
3502}
3503
3504
3505/**
3506 * Write to guest physical memory referenced by GC pointer.
3507 *
3508 * This function uses the current CR3/CR0/CR4 of the guest and will
3509 * respect access handlers and set dirty and accessed bits.
3510 *
3511 * @returns Strict VBox status, see PGMPhysWrite for details.
3512 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3513 * specified virtual address.
3514 *
3515 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3516 * @param GCPtrDst The destination address (GC pointer).
3517 * @param pvSrc The source address.
3518 * @param cb The number of bytes to write.
3519 * @param enmOrigin Who is calling.
3520 */
3521VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3522{
3523 int rc;
3524 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3525 VMCPU_ASSERT_EMT(pVCpu);
3526
3527 /*
3528 * Anything to do?
3529 */
3530 if (!cb)
3531 return VINF_SUCCESS;
3532
3533 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3534
3535 /*
3536 * Optimize writes within a single page.
3537 */
3538 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3539 {
3540 /* Convert virtual to physical address + flags */
3541 PGMPTWALK Walk;
3542 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3543 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3544 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3545
3546 /* Mention when we ignore X86_PTE_RW... */
3547 if (!(Walk.fEffective & X86_PTE_RW))
3548 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3549
3550 /* Mark the guest page as accessed and dirty if necessary. */
3551 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3552 {
3553 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3554 AssertRC(rc);
3555 }
3556
3557 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3558 }
3559
3560 /*
3561 * Page by page.
3562 */
3563 for (;;)
3564 {
3565 /* Convert virtual to physical address + flags */
3566 PGMPTWALK Walk;
3567 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3568 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3569 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3570
3571 /* Mention when we ignore X86_PTE_RW... */
3572 if (!(Walk.fEffective & X86_PTE_RW))
3573 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3574
3575 /* Mark the guest page as accessed and dirty if necessary. */
3576 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3577 {
3578 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3579 AssertRC(rc);
3580 }
3581
3582 /* copy */
3583 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3584 if (cbWrite < cb)
3585 {
3586 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3587 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3588 { /* likely */ }
3589 else
3590 return rcStrict;
3591 }
3592 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3593 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3594
3595 /* next */
3596 Assert(cb > cbWrite);
3597 cb -= cbWrite;
3598 pvSrc = (uint8_t *)pvSrc + cbWrite;
3599 GCPtrDst += cbWrite;
3600 }
3601}
3602
3603
3604/**
3605 * Return the page type of the specified physical address.
3606 *
3607 * @returns The page type.
3608 * @param pVM The cross context VM structure.
3609 * @param GCPhys Guest physical address
3610 */
3611VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3612{
3613 PGM_LOCK_VOID(pVM);
3614 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3615 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3616 PGM_UNLOCK(pVM);
3617
3618 return enmPgType;
3619}
3620
3621
3622/**
3623 * Converts a GC physical address to a HC ring-3 pointer, with some
3624 * additional checks.
3625 *
3626 * @returns VBox status code (no informational statuses).
3627 *
3628 * @param pVM The cross context VM structure.
3629 * @param pVCpu The cross context virtual CPU structure of the
3630 * calling EMT.
3631 * @param GCPhys The GC physical address to convert. This API mask
3632 * the A20 line when necessary.
3633 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3634 * be done while holding the PGM lock.
3635 * @param ppb Where to store the pointer corresponding to GCPhys
3636 * on success.
3637 * @param pfTlb The TLB flags and revision. We only add stuff.
3638 *
3639 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3640 * PGMPhysIemGCPhys2Ptr.
3641 *
3642 * @thread EMT(pVCpu).
3643 */
3644VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3645 R3R0PTRTYPE(uint8_t *) *ppb,
3646 uint64_t *pfTlb)
3647{
3648 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3649 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3650
3651 PGM_LOCK_VOID(pVM);
3652
3653 PPGMRAMRANGE pRam;
3654 PPGMPAGE pPage;
3655 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3656 if (RT_SUCCESS(rc))
3657 {
3658 if (!PGM_PAGE_IS_BALLOONED(pPage))
3659 {
3660 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3661 {
3662 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3663 {
3664 /*
3665 * No access handler.
3666 */
3667 switch (PGM_PAGE_GET_STATE(pPage))
3668 {
3669 case PGM_PAGE_STATE_ALLOCATED:
3670 *pfTlb |= *puTlbPhysRev;
3671 break;
3672 case PGM_PAGE_STATE_BALLOONED:
3673 AssertFailed();
3674 RT_FALL_THRU();
3675 case PGM_PAGE_STATE_ZERO:
3676 case PGM_PAGE_STATE_SHARED:
3677 case PGM_PAGE_STATE_WRITE_MONITORED:
3678 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3679 break;
3680 }
3681
3682 PPGMPAGEMAPTLBE pTlbe;
3683 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3684 AssertLogRelRCReturn(rc, rc);
3685 *ppb = (uint8_t *)pTlbe->pv;
3686 }
3687 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3688 {
3689 /*
3690 * MMIO or similar all access handler: Catch all access.
3691 */
3692 *pfTlb |= *puTlbPhysRev
3693 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3694 *ppb = NULL;
3695 }
3696 else
3697 {
3698 /*
3699 * Write access handler: Catch write accesses if active.
3700 */
3701 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3702 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3703 else
3704 switch (PGM_PAGE_GET_STATE(pPage))
3705 {
3706 case PGM_PAGE_STATE_ALLOCATED:
3707 *pfTlb |= *puTlbPhysRev;
3708 break;
3709 case PGM_PAGE_STATE_BALLOONED:
3710 AssertFailed();
3711 RT_FALL_THRU();
3712 case PGM_PAGE_STATE_ZERO:
3713 case PGM_PAGE_STATE_SHARED:
3714 case PGM_PAGE_STATE_WRITE_MONITORED:
3715 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3716 break;
3717 }
3718
3719 PPGMPAGEMAPTLBE pTlbe;
3720 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3721 AssertLogRelRCReturn(rc, rc);
3722 *ppb = (uint8_t *)pTlbe->pv;
3723 }
3724 }
3725 else
3726 {
3727 /* Alias MMIO: For now, we catch all access. */
3728 *pfTlb |= *puTlbPhysRev
3729 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3730 *ppb = NULL;
3731 }
3732 }
3733 else
3734 {
3735 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3736 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3737 *ppb = NULL;
3738 }
3739 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3740 }
3741 else
3742 {
3743 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3744 *ppb = NULL;
3745 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3746 }
3747
3748 PGM_UNLOCK(pVM);
3749 return VINF_SUCCESS;
3750}
3751
3752
3753/**
3754 * Converts a GC physical address to a HC ring-3 pointer, with some
3755 * additional checks.
3756 *
3757 * @returns VBox status code (no informational statuses).
3758 * @retval VINF_SUCCESS on success.
3759 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3760 * access handler of some kind.
3761 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3762 * accesses or is odd in any way.
3763 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3764 *
3765 * @param pVM The cross context VM structure.
3766 * @param pVCpu The cross context virtual CPU structure of the
3767 * calling EMT.
3768 * @param GCPhys The GC physical address to convert. This API mask
3769 * the A20 line when necessary.
3770 * @param fWritable Whether write access is required.
3771 * @param fByPassHandlers Whether to bypass access handlers.
3772 * @param ppv Where to store the pointer corresponding to GCPhys
3773 * on success.
3774 * @param pLock
3775 *
3776 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3777 * @thread EMT(pVCpu).
3778 */
3779VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3780 void **ppv, PPGMPAGEMAPLOCK pLock)
3781{
3782 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3783
3784 PGM_LOCK_VOID(pVM);
3785
3786 PPGMRAMRANGE pRam;
3787 PPGMPAGE pPage;
3788 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3789 if (RT_SUCCESS(rc))
3790 {
3791 if (PGM_PAGE_IS_BALLOONED(pPage))
3792 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3793 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3794 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3795 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3796 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3797 rc = VINF_SUCCESS;
3798 else
3799 {
3800 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3801 {
3802 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3803 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3804 }
3805 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3806 {
3807 Assert(!fByPassHandlers);
3808 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3809 }
3810 }
3811 if (RT_SUCCESS(rc))
3812 {
3813 int rc2;
3814
3815 /* Make sure what we return is writable. */
3816 if (fWritable)
3817 switch (PGM_PAGE_GET_STATE(pPage))
3818 {
3819 case PGM_PAGE_STATE_ALLOCATED:
3820 break;
3821 case PGM_PAGE_STATE_BALLOONED:
3822 AssertFailed();
3823 break;
3824 case PGM_PAGE_STATE_ZERO:
3825 case PGM_PAGE_STATE_SHARED:
3826 case PGM_PAGE_STATE_WRITE_MONITORED:
3827 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
3828 AssertLogRelRCReturn(rc2, rc2);
3829 break;
3830 }
3831
3832 /* Get a ring-3 mapping of the address. */
3833 PPGMPAGEMAPTLBE pTlbe;
3834 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3835 AssertLogRelRCReturn(rc2, rc2);
3836
3837 /* Lock it and calculate the address. */
3838 if (fWritable)
3839 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3840 else
3841 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3842 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
3843
3844 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3845 }
3846 else
3847 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3848
3849 /* else: handler catching all access, no pointer returned. */
3850 }
3851 else
3852 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3853
3854 PGM_UNLOCK(pVM);
3855 return rc;
3856}
3857
3858
3859/**
3860 * Checks if the give GCPhys page requires special handling for the given access
3861 * because it's MMIO or otherwise monitored.
3862 *
3863 * @returns VBox status code (no informational statuses).
3864 * @retval VINF_SUCCESS on success.
3865 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3866 * access handler of some kind.
3867 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3868 * accesses or is odd in any way.
3869 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3870 *
3871 * @param pVM The cross context VM structure.
3872 * @param GCPhys The GC physical address to convert. Since this is
3873 * only used for filling the REM TLB, the A20 mask must
3874 * be applied before calling this API.
3875 * @param fWritable Whether write access is required.
3876 * @param fByPassHandlers Whether to bypass access handlers.
3877 *
3878 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3879 * a stop gap thing that should be removed once there is a better TLB
3880 * for virtual address accesses.
3881 */
3882VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3883{
3884 PGM_LOCK_VOID(pVM);
3885 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3886
3887 PPGMRAMRANGE pRam;
3888 PPGMPAGE pPage;
3889 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3890 if (RT_SUCCESS(rc))
3891 {
3892 if (PGM_PAGE_IS_BALLOONED(pPage))
3893 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3894 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3895 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3896 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3897 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3898 rc = VINF_SUCCESS;
3899 else
3900 {
3901 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3902 {
3903 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3904 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3905 }
3906 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3907 {
3908 Assert(!fByPassHandlers);
3909 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3910 }
3911 }
3912 }
3913
3914 PGM_UNLOCK(pVM);
3915 return rc;
3916}
3917
3918#ifdef VBOX_WITH_NATIVE_NEM
3919
3920/**
3921 * Interface used by NEM to check what to do on a memory access exit.
3922 *
3923 * @returns VBox status code.
3924 * @param pVM The cross context VM structure.
3925 * @param pVCpu The cross context per virtual CPU structure.
3926 * Optional.
3927 * @param GCPhys The guest physical address.
3928 * @param fMakeWritable Whether to try make the page writable or not. If it
3929 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
3930 * be returned and the return code will be unaffected
3931 * @param pInfo Where to return the page information. This is
3932 * initialized even on failure.
3933 * @param pfnChecker Page in-sync checker callback. Optional.
3934 * @param pvUser User argument to pass to pfnChecker.
3935 */
3936VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
3937 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
3938{
3939 PGM_LOCK_VOID(pVM);
3940
3941 PPGMPAGE pPage;
3942 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
3943 if (RT_SUCCESS(rc))
3944 {
3945 /* Try make it writable if requested. */
3946 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
3947 if (fMakeWritable)
3948 switch (PGM_PAGE_GET_STATE(pPage))
3949 {
3950 case PGM_PAGE_STATE_SHARED:
3951 case PGM_PAGE_STATE_WRITE_MONITORED:
3952 case PGM_PAGE_STATE_ZERO:
3953 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3954 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
3955 rc = VINF_SUCCESS;
3956 break;
3957 }
3958
3959 /* Fill in the info. */
3960 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
3961 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
3962 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
3963 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
3964 pInfo->enmType = enmType;
3965 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
3966 switch (PGM_PAGE_GET_STATE(pPage))
3967 {
3968 case PGM_PAGE_STATE_ALLOCATED:
3969 pInfo->fZeroPage = 0;
3970 break;
3971
3972 case PGM_PAGE_STATE_ZERO:
3973 pInfo->fZeroPage = 1;
3974 break;
3975
3976 case PGM_PAGE_STATE_WRITE_MONITORED:
3977 pInfo->fZeroPage = 0;
3978 break;
3979
3980 case PGM_PAGE_STATE_SHARED:
3981 pInfo->fZeroPage = 0;
3982 break;
3983
3984 case PGM_PAGE_STATE_BALLOONED:
3985 pInfo->fZeroPage = 1;
3986 break;
3987
3988 default:
3989 pInfo->fZeroPage = 1;
3990 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
3991 }
3992
3993 /* Call the checker and update NEM state. */
3994 if (pfnChecker)
3995 {
3996 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
3997 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
3998 }
3999
4000 /* Done. */
4001 PGM_UNLOCK(pVM);
4002 }
4003 else
4004 {
4005 PGM_UNLOCK(pVM);
4006
4007 pInfo->HCPhys = NIL_RTHCPHYS;
4008 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4009 pInfo->u2NemState = 0;
4010 pInfo->fHasHandlers = 0;
4011 pInfo->fZeroPage = 0;
4012 pInfo->enmType = PGMPAGETYPE_INVALID;
4013 }
4014
4015 return rc;
4016}
4017
4018
4019/**
4020 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4021 * or higher.
4022 *
4023 * @returns VBox status code from callback.
4024 * @param pVM The cross context VM structure.
4025 * @param pVCpu The cross context per CPU structure. This is
4026 * optional as its only for passing to callback.
4027 * @param uMinState The minimum NEM state value to call on.
4028 * @param pfnCallback The callback function.
4029 * @param pvUser User argument for the callback.
4030 */
4031VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4032 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4033{
4034 /*
4035 * Just brute force this problem.
4036 */
4037 PGM_LOCK_VOID(pVM);
4038 int rc = VINF_SUCCESS;
4039 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4040 {
4041 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4042 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4043 {
4044 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4045 if (u2State < uMinState)
4046 { /* likely */ }
4047 else
4048 {
4049 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4050 if (RT_SUCCESS(rc))
4051 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4052 else
4053 break;
4054 }
4055 }
4056 }
4057 PGM_UNLOCK(pVM);
4058
4059 return rc;
4060}
4061
4062
4063/**
4064 * Helper for setting the NEM state for a range of pages.
4065 *
4066 * @param paPages Array of pages to modify.
4067 * @param cPages How many pages to modify.
4068 * @param u2State The new state value.
4069 */
4070void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4071{
4072 PPGMPAGE pPage = paPages;
4073 while (cPages-- > 0)
4074 {
4075 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4076 pPage++;
4077 }
4078}
4079
4080#endif /* VBOX_WITH_NATIVE_NEM */
4081
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette