VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 86667

Last change on this file since 86667 was 86473, checked in by vboxsync, 4 years ago

VMM/PGM: Working on eliminating page table bitfield use. bugref:9841 bugref:9746

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 167.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 86473 2020-10-07 17:30:25Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
51 * Checks if valid physical access handler return code (normal handler, not PF).
52 *
53 * Checks if the given strict status code is one of the expected ones for a
54 * physical access handler in the current context.
55 *
56 * @returns true or false.
57 * @param a_rcStrict The status code.
58 * @param a_fWrite Whether it is a write or read being serviced.
59 *
60 * @remarks We wish to keep the list of statuses here as short as possible.
61 * When changing, please make sure to update the PGMPhysRead,
62 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
63 */
64#ifdef IN_RING3
65# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
66 ( (a_rcStrict) == VINF_SUCCESS \
67 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
68#elif defined(IN_RING0)
69#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
70 ( (a_rcStrict) == VINF_SUCCESS \
71 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
72 \
73 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
74 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
75 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
76 \
77 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
78 || (a_rcStrict) == VINF_EM_DBG_STOP \
79 || (a_rcStrict) == VINF_EM_DBG_EVENT \
80 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
81 || (a_rcStrict) == VINF_EM_OFF \
82 || (a_rcStrict) == VINF_EM_SUSPEND \
83 || (a_rcStrict) == VINF_EM_RESET \
84 )
85#else
86# error "Context?"
87#endif
88
89/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
90 * Checks if valid virtual access handler return code (normal handler, not PF).
91 *
92 * Checks if the given strict status code is one of the expected ones for a
93 * virtual access handler in the current context.
94 *
95 * @returns true or false.
96 * @param a_rcStrict The status code.
97 * @param a_fWrite Whether it is a write or read being serviced.
98 *
99 * @remarks We wish to keep the list of statuses here as short as possible.
100 * When changing, please make sure to update the PGMPhysRead,
101 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
102 */
103#ifdef IN_RING3
104# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
105 ( (a_rcStrict) == VINF_SUCCESS \
106 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
107#elif defined(IN_RING0)
108# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
109 (false /* no virtual handlers in ring-0! */ )
110#else
111# error "Context?"
112#endif
113
114
115
116#ifndef IN_RING3
117
118/**
119 * @callback_method_impl{FNPGMPHYSHANDLER,
120 * Dummy for forcing ring-3 handling of the access.}
121 */
122DECLEXPORT(VBOXSTRICTRC)
123pgmPhysHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
124 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
125{
126 NOREF(pVM); NOREF(pVCpu); NOREF(GCPhys); NOREF(pvPhys); NOREF(pvBuf); NOREF(cbBuf);
127 NOREF(enmAccessType); NOREF(enmOrigin); NOREF(pvUser);
128 return VINF_EM_RAW_EMULATE_INSTR;
129}
130
131
132/**
133 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
134 * Dummy for forcing ring-3 handling of the access.}
135 */
136VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
137 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
138{
139 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
140 return VINF_EM_RAW_EMULATE_INSTR;
141}
142
143
144/**
145 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
146 * \#PF access handler callback for guest ROM range write access.}
147 *
148 * @remarks The @a pvUser argument points to the PGMROMRANGE.
149 */
150DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
151 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
152{
153 int rc;
154 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
155 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
156 NOREF(uErrorCode); NOREF(pvFault);
157
158 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
159
160 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
161 switch (pRom->aPages[iPage].enmProt)
162 {
163 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
164 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
165 {
166 /*
167 * If it's a simple instruction which doesn't change the cpu state
168 * we will simply skip it. Otherwise we'll have to defer it to REM.
169 */
170 uint32_t cbOp;
171 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
172 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
173 if ( RT_SUCCESS(rc)
174 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
175 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
176 {
177 switch (pDis->bOpCode)
178 {
179 /** @todo Find other instructions we can safely skip, possibly
180 * adding this kind of detection to DIS or EM. */
181 case OP_MOV:
182 pRegFrame->rip += cbOp;
183 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
184 return VINF_SUCCESS;
185 }
186 }
187 break;
188 }
189
190 case PGMROMPROT_READ_RAM_WRITE_RAM:
191 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
192 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
193 AssertRC(rc);
194 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
195
196 case PGMROMPROT_READ_ROM_WRITE_RAM:
197 /* Handle it in ring-3 because it's *way* easier there. */
198 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
199 break;
200
201 default:
202 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
203 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
204 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
205 }
206
207 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
208 return VINF_EM_RAW_EMULATE_INSTR;
209}
210
211#endif /* !IN_RING3 */
212
213
214/**
215 * @callback_method_impl{FNPGMPHYSHANDLER,
216 * Access handler callback for ROM write accesses.}
217 *
218 * @remarks The @a pvUser argument points to the PGMROMRANGE.
219 */
220PGM_ALL_CB2_DECL(VBOXSTRICTRC)
221pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
222 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
223{
224 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
225 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
226 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
227 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
228 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
229 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);
230
231 if (enmAccessType == PGMACCESSTYPE_READ)
232 {
233 switch (pRomPage->enmProt)
234 {
235 /*
236 * Take the default action.
237 */
238 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
239 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
240 case PGMROMPROT_READ_ROM_WRITE_RAM:
241 case PGMROMPROT_READ_RAM_WRITE_RAM:
242 return VINF_PGM_HANDLER_DO_DEFAULT;
243
244 default:
245 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
246 pRom->aPages[iPage].enmProt, iPage, GCPhys),
247 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
248 }
249 }
250 else
251 {
252 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
253 switch (pRomPage->enmProt)
254 {
255 /*
256 * Ignore writes.
257 */
258 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
259 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
260 return VINF_SUCCESS;
261
262 /*
263 * Write to the RAM page.
264 */
265 case PGMROMPROT_READ_ROM_WRITE_RAM:
266 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
267 {
268 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
269 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
270
271 /*
272 * Take the lock, do lazy allocation, map the page and copy the data.
273 *
274 * Note that we have to bypass the mapping TLB since it works on
275 * guest physical addresses and entering the shadow page would
276 * kind of screw things up...
277 */
278 int rc = pgmLock(pVM);
279 AssertRC(rc);
280
281 PPGMPAGE pShadowPage = &pRomPage->Shadow;
282 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
283 {
284 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
285 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE);
286 }
287
288 void *pvDstPage;
289 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
290 if (RT_SUCCESS(rc))
291 {
292 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
293 pRomPage->LiveSave.fWrittenTo = true;
294
295 AssertMsg( rc == VINF_SUCCESS
296 || ( rc == VINF_PGM_SYNC_CR3
297 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
298 , ("%Rrc\n", rc));
299 rc = VINF_SUCCESS;
300 }
301
302 pgmUnlock(pVM);
303 return rc;
304 }
305
306 default:
307 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
308 pRom->aPages[iPage].enmProt, iPage, GCPhys),
309 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
310 }
311 }
312}
313
314
315/**
316 * Invalidates the RAM range TLBs.
317 *
318 * @param pVM The cross context VM structure.
319 */
320void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
321{
322 pgmLock(pVM);
323 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
324 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
325 pgmUnlock(pVM);
326}
327
328
329/**
330 * Tests if a value of type RTGCPHYS is negative if the type had been signed
331 * instead of unsigned.
332 *
333 * @returns @c true if negative, @c false if positive or zero.
334 * @param a_GCPhys The value to test.
335 * @todo Move me to iprt/types.h.
336 */
337#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
338
339
340/**
341 * Slow worker for pgmPhysGetRange.
342 *
343 * @copydoc pgmPhysGetRange
344 */
345PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
346{
347 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
348
349 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
350 while (pRam)
351 {
352 RTGCPHYS off = GCPhys - pRam->GCPhys;
353 if (off < pRam->cb)
354 {
355 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
356 return pRam;
357 }
358 if (RTGCPHYS_IS_NEGATIVE(off))
359 pRam = pRam->CTX_SUFF(pLeft);
360 else
361 pRam = pRam->CTX_SUFF(pRight);
362 }
363 return NULL;
364}
365
366
367/**
368 * Slow worker for pgmPhysGetRangeAtOrAbove.
369 *
370 * @copydoc pgmPhysGetRangeAtOrAbove
371 */
372PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
373{
374 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
375
376 PPGMRAMRANGE pLastLeft = NULL;
377 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
378 while (pRam)
379 {
380 RTGCPHYS off = GCPhys - pRam->GCPhys;
381 if (off < pRam->cb)
382 {
383 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
384 return pRam;
385 }
386 if (RTGCPHYS_IS_NEGATIVE(off))
387 {
388 pLastLeft = pRam;
389 pRam = pRam->CTX_SUFF(pLeft);
390 }
391 else
392 pRam = pRam->CTX_SUFF(pRight);
393 }
394 return pLastLeft;
395}
396
397
398/**
399 * Slow worker for pgmPhysGetPage.
400 *
401 * @copydoc pgmPhysGetPage
402 */
403PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
404{
405 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
406
407 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
408 while (pRam)
409 {
410 RTGCPHYS off = GCPhys - pRam->GCPhys;
411 if (off < pRam->cb)
412 {
413 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
414 return &pRam->aPages[off >> PAGE_SHIFT];
415 }
416
417 if (RTGCPHYS_IS_NEGATIVE(off))
418 pRam = pRam->CTX_SUFF(pLeft);
419 else
420 pRam = pRam->CTX_SUFF(pRight);
421 }
422 return NULL;
423}
424
425
426/**
427 * Slow worker for pgmPhysGetPageEx.
428 *
429 * @copydoc pgmPhysGetPageEx
430 */
431int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
432{
433 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
434
435 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
436 while (pRam)
437 {
438 RTGCPHYS off = GCPhys - pRam->GCPhys;
439 if (off < pRam->cb)
440 {
441 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
442 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
443 return VINF_SUCCESS;
444 }
445
446 if (RTGCPHYS_IS_NEGATIVE(off))
447 pRam = pRam->CTX_SUFF(pLeft);
448 else
449 pRam = pRam->CTX_SUFF(pRight);
450 }
451
452 *ppPage = NULL;
453 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
454}
455
456
457/**
458 * Slow worker for pgmPhysGetPageAndRangeEx.
459 *
460 * @copydoc pgmPhysGetPageAndRangeEx
461 */
462int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
463{
464 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
465
466 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
467 while (pRam)
468 {
469 RTGCPHYS off = GCPhys - pRam->GCPhys;
470 if (off < pRam->cb)
471 {
472 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
473 *ppRam = pRam;
474 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
475 return VINF_SUCCESS;
476 }
477
478 if (RTGCPHYS_IS_NEGATIVE(off))
479 pRam = pRam->CTX_SUFF(pLeft);
480 else
481 pRam = pRam->CTX_SUFF(pRight);
482 }
483
484 *ppRam = NULL;
485 *ppPage = NULL;
486 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
487}
488
489
490/**
491 * Checks if Address Gate 20 is enabled or not.
492 *
493 * @returns true if enabled.
494 * @returns false if disabled.
495 * @param pVCpu The cross context virtual CPU structure.
496 */
497VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
498{
499 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
500 return pVCpu->pgm.s.fA20Enabled;
501}
502
503
504/**
505 * Validates a GC physical address.
506 *
507 * @returns true if valid.
508 * @returns false if invalid.
509 * @param pVM The cross context VM structure.
510 * @param GCPhys The physical address to validate.
511 */
512VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
513{
514 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
515 return pPage != NULL;
516}
517
518
519/**
520 * Checks if a GC physical address is a normal page,
521 * i.e. not ROM, MMIO or reserved.
522 *
523 * @returns true if normal.
524 * @returns false if invalid, ROM, MMIO or reserved page.
525 * @param pVM The cross context VM structure.
526 * @param GCPhys The physical address to check.
527 */
528VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
529{
530 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
531 return pPage
532 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
533}
534
535
536/**
537 * Converts a GC physical address to a HC physical address.
538 *
539 * @returns VINF_SUCCESS on success.
540 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
541 * page but has no physical backing.
542 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
543 * GC physical address.
544 *
545 * @param pVM The cross context VM structure.
546 * @param GCPhys The GC physical address to convert.
547 * @param pHCPhys Where to store the HC physical address on success.
548 */
549VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
550{
551 pgmLock(pVM);
552 PPGMPAGE pPage;
553 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
554 if (RT_SUCCESS(rc))
555 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
556 pgmUnlock(pVM);
557 return rc;
558}
559
560
561/**
562 * Invalidates all page mapping TLBs.
563 *
564 * @param pVM The cross context VM structure.
565 */
566void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
567{
568 pgmLock(pVM);
569 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
570
571 /* Clear the R3 & R0 TLBs completely. */
572 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
573 {
574 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
575 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
576#ifndef VBOX_WITH_RAM_IN_KERNEL
577 pVM->pgm.s.PhysTlbR0.aEntries[i].pMap = 0;
578#endif
579 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
580 }
581
582 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
583 {
584 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
585 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
586 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
587 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
588 }
589
590 pgmUnlock(pVM);
591}
592
593
594/**
595 * Invalidates a page mapping TLB entry
596 *
597 * @param pVM The cross context VM structure.
598 * @param GCPhys GCPhys entry to flush
599 */
600void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
601{
602 PGM_LOCK_ASSERT_OWNER(pVM);
603
604 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
605
606 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
607
608 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
609 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
610#ifndef VBOX_WITH_RAM_IN_KERNEL
611 pVM->pgm.s.PhysTlbR0.aEntries[idx].pMap = 0;
612#endif
613 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
614
615 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
616 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
617 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
618 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
619}
620
621
622/**
623 * Makes sure that there is at least one handy page ready for use.
624 *
625 * This will also take the appropriate actions when reaching water-marks.
626 *
627 * @returns VBox status code.
628 * @retval VINF_SUCCESS on success.
629 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
630 *
631 * @param pVM The cross context VM structure.
632 *
633 * @remarks Must be called from within the PGM critical section. It may
634 * nip back to ring-3/0 in some cases.
635 */
636static int pgmPhysEnsureHandyPage(PVMCC pVM)
637{
638 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
639
640 /*
641 * Do we need to do anything special?
642 */
643#ifdef IN_RING3
644 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
645#else
646 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
647#endif
648 {
649 /*
650 * Allocate pages only if we're out of them, or in ring-3, almost out.
651 */
652#ifdef IN_RING3
653 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
654#else
655 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
656#endif
657 {
658 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
659 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
660#ifdef IN_RING3
661 int rc = PGMR3PhysAllocateHandyPages(pVM);
662#else
663 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
664#endif
665 if (RT_UNLIKELY(rc != VINF_SUCCESS))
666 {
667 if (RT_FAILURE(rc))
668 return rc;
669 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
670 if (!pVM->pgm.s.cHandyPages)
671 {
672 LogRel(("PGM: no more handy pages!\n"));
673 return VERR_EM_NO_MEMORY;
674 }
675 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
676 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
677#ifndef IN_RING3
678 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
679#endif
680 }
681 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
682 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
683 ("%u\n", pVM->pgm.s.cHandyPages),
684 VERR_PGM_HANDY_PAGE_IPE);
685 }
686 else
687 {
688 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
689 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
690#ifndef IN_RING3
691 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
692 {
693 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
694 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
695 }
696#endif
697 }
698 }
699
700 return VINF_SUCCESS;
701}
702
703
704
705/**
706 * Replace a zero or shared page with new page that we can write to.
707 *
708 * @returns The following VBox status codes.
709 * @retval VINF_SUCCESS on success, pPage is modified.
710 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
711 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
712 *
713 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
714 *
715 * @param pVM The cross context VM structure.
716 * @param pPage The physical page tracking structure. This will
717 * be modified on success.
718 * @param GCPhys The address of the page.
719 *
720 * @remarks Must be called from within the PGM critical section. It may
721 * nip back to ring-3/0 in some cases.
722 *
723 * @remarks This function shouldn't really fail, however if it does
724 * it probably means we've screwed up the size of handy pages and/or
725 * the low-water mark. Or, that some device I/O is causing a lot of
726 * pages to be allocated while while the host is in a low-memory
727 * condition. This latter should be handled elsewhere and in a more
728 * controlled manner, it's on the @bugref{3170} todo list...
729 */
730int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
731{
732 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
733
734 /*
735 * Prereqs.
736 */
737 PGM_LOCK_ASSERT_OWNER(pVM);
738 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
739 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
740
741# ifdef PGM_WITH_LARGE_PAGES
742 /*
743 * Try allocate a large page if applicable.
744 */
745 if ( PGMIsUsingLargePages(pVM)
746 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
747 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
748 {
749 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
750 PPGMPAGE pBasePage;
751
752 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
753 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
754 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
755 {
756 rc = pgmPhysAllocLargePage(pVM, GCPhys);
757 if (rc == VINF_SUCCESS)
758 return rc;
759 }
760 /* Mark the base as type page table, so we don't check over and over again. */
761 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
762
763 /* fall back to 4KB pages. */
764 }
765# endif
766
767 /*
768 * Flush any shadow page table mappings of the page.
769 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
770 */
771 bool fFlushTLBs = false;
772 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
773 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
774
775 /*
776 * Ensure that we've got a page handy, take it and use it.
777 */
778 int rc2 = pgmPhysEnsureHandyPage(pVM);
779 if (RT_FAILURE(rc2))
780 {
781 if (fFlushTLBs)
782 PGM_INVL_ALL_VCPU_TLBS(pVM);
783 Assert(rc2 == VERR_EM_NO_MEMORY);
784 return rc2;
785 }
786 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
787 PGM_LOCK_ASSERT_OWNER(pVM);
788 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
789 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
790
791 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
792 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
793 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
794 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
795 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
796 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
797
798 /*
799 * There are one or two action to be taken the next time we allocate handy pages:
800 * - Tell the GMM (global memory manager) what the page is being used for.
801 * (Speeds up replacement operations - sharing and defragmenting.)
802 * - If the current backing is shared, it must be freed.
803 */
804 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
805 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
806
807 void const *pvSharedPage = NULL;
808 if (PGM_PAGE_IS_SHARED(pPage))
809 {
810 /* Mark this shared page for freeing/dereferencing. */
811 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
812 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
813
814 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
815 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
816 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
817 pVM->pgm.s.cSharedPages--;
818
819 /* Grab the address of the page so we can make a copy later on. (safe) */
820 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
821 AssertRC(rc);
822 }
823 else
824 {
825 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
826 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
827 pVM->pgm.s.cZeroPages--;
828 }
829
830 /*
831 * Do the PGMPAGE modifications.
832 */
833 pVM->pgm.s.cPrivatePages++;
834 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
835 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
836 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
837 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
838 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
839
840 /* Copy the shared page contents to the replacement page. */
841 if (pvSharedPage)
842 {
843 /* Get the virtual address of the new page. */
844 PGMPAGEMAPLOCK PgMpLck;
845 void *pvNewPage;
846 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
847 if (RT_SUCCESS(rc))
848 {
849 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
850 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
851 }
852 }
853
854 if ( fFlushTLBs
855 && rc != VINF_PGM_GCPHYS_ALIASED)
856 PGM_INVL_ALL_VCPU_TLBS(pVM);
857
858 /*
859 * Notify NEM about the mapping change for this page.
860 *
861 * Note! Shadow ROM pages are complicated as they can definitely be
862 * allocated while not visible, so play safe.
863 */
864 if (VM_IS_NEM_ENABLED(pVM))
865 {
866 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
867 if ( enmType != PGMPAGETYPE_ROM_SHADOW
868 || pgmPhysGetPage(pVM, GCPhys) == pPage)
869 {
870 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
871 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
872 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
873 if (RT_SUCCESS(rc))
874 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
875 else
876 rc = rc2;
877 }
878 }
879
880 return rc;
881}
882
883#ifdef PGM_WITH_LARGE_PAGES
884
885/**
886 * Replace a 2 MB range of zero pages with new pages that we can write to.
887 *
888 * @returns The following VBox status codes.
889 * @retval VINF_SUCCESS on success, pPage is modified.
890 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
891 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
892 *
893 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
894 *
895 * @param pVM The cross context VM structure.
896 * @param GCPhys The address of the page.
897 *
898 * @remarks Must be called from within the PGM critical section. It may
899 * nip back to ring-3/0 in some cases.
900 */
901int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
902{
903 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
904 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
905 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
906
907 /*
908 * Prereqs.
909 */
910 PGM_LOCK_ASSERT_OWNER(pVM);
911 Assert(PGMIsUsingLargePages(pVM));
912
913 PPGMPAGE pFirstPage;
914 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
915 if ( RT_SUCCESS(rc)
916 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
917 {
918 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
919
920 /* Don't call this function for already allocated pages. */
921 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
922
923 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
924 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
925 {
926 /* Lazy approach: check all pages in the 2 MB range.
927 * The whole range must be ram and unallocated. */
928 GCPhys = GCPhysBase;
929 unsigned iPage;
930 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
931 {
932 PPGMPAGE pSubPage;
933 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
934 if ( RT_FAILURE(rc)
935 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
936 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
937 {
938 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
939 break;
940 }
941 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
942 GCPhys += PAGE_SIZE;
943 }
944 if (iPage != _2M/PAGE_SIZE)
945 {
946 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
947 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
948 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
949 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
950 }
951
952 /*
953 * Do the allocation.
954 */
955# ifdef IN_RING3
956 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
957# else
958 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
959# endif
960 if (RT_SUCCESS(rc))
961 {
962 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
963 pVM->pgm.s.cLargePages++;
964 return VINF_SUCCESS;
965 }
966
967 /* If we fail once, it most likely means the host's memory is too
968 fragmented; don't bother trying again. */
969 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
970 PGMSetLargePageUsage(pVM, false);
971 return rc;
972 }
973 }
974 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
975}
976
977
978/**
979 * Recheck the entire 2 MB range to see if we can use it again as a large page.
980 *
981 * @returns The following VBox status codes.
982 * @retval VINF_SUCCESS on success, the large page can be used again
983 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
984 *
985 * @param pVM The cross context VM structure.
986 * @param GCPhys The address of the page.
987 * @param pLargePage Page structure of the base page
988 */
989int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
990{
991 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
992
993 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
994
995 GCPhys &= X86_PDE2M_PAE_PG_MASK;
996
997 /* Check the base page. */
998 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
999 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1000 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1001 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1002 {
1003 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1004 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1005 }
1006
1007 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
1008 /* Check all remaining pages in the 2 MB range. */
1009 unsigned i;
1010 GCPhys += PAGE_SIZE;
1011 for (i = 1; i < _2M/PAGE_SIZE; i++)
1012 {
1013 PPGMPAGE pPage;
1014 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1015 AssertRCBreak(rc);
1016
1017 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1018 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1019 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1020 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1021 {
1022 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1023 break;
1024 }
1025
1026 GCPhys += PAGE_SIZE;
1027 }
1028 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
1029
1030 if (i == _2M/PAGE_SIZE)
1031 {
1032 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1033 pVM->pgm.s.cLargePagesDisabled--;
1034 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1035 return VINF_SUCCESS;
1036 }
1037
1038 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1039}
1040
1041#endif /* PGM_WITH_LARGE_PAGES */
1042
1043
1044/**
1045 * Deal with a write monitored page.
1046 *
1047 * @returns VBox strict status code.
1048 *
1049 * @param pVM The cross context VM structure.
1050 * @param pPage The physical page tracking structure.
1051 * @param GCPhys The guest physical address of the page.
1052 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1053 * very unlikely situation where it is okay that we let NEM
1054 * fix the page access in a lazy fasion.
1055 *
1056 * @remarks Called from within the PGM critical section.
1057 */
1058void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1059{
1060 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1061 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1062 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1063 Assert(pVM->pgm.s.cMonitoredPages > 0);
1064 pVM->pgm.s.cMonitoredPages--;
1065 pVM->pgm.s.cWrittenToPages++;
1066
1067 /*
1068 * Notify NEM about the protection change so we won't spin forever.
1069 *
1070 * Note! NEM need to be handle to lazily correct page protection as we cannot
1071 * really get it 100% right here it seems. The page pool does this too.
1072 */
1073 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1074 {
1075 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1076 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1077 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1078 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1079 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1080 }
1081}
1082
1083
1084/**
1085 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1086 *
1087 * @returns VBox strict status code.
1088 * @retval VINF_SUCCESS on success.
1089 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1090 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1091 *
1092 * @param pVM The cross context VM structure.
1093 * @param pPage The physical page tracking structure.
1094 * @param GCPhys The address of the page.
1095 *
1096 * @remarks Called from within the PGM critical section.
1097 */
1098int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1099{
1100 PGM_LOCK_ASSERT_OWNER(pVM);
1101 switch (PGM_PAGE_GET_STATE(pPage))
1102 {
1103 case PGM_PAGE_STATE_WRITE_MONITORED:
1104 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1105 RT_FALL_THRU();
1106 default: /* to shut up GCC */
1107 case PGM_PAGE_STATE_ALLOCATED:
1108 return VINF_SUCCESS;
1109
1110 /*
1111 * Zero pages can be dummy pages for MMIO or reserved memory,
1112 * so we need to check the flags before joining cause with
1113 * shared page replacement.
1114 */
1115 case PGM_PAGE_STATE_ZERO:
1116 if (PGM_PAGE_IS_MMIO(pPage))
1117 return VERR_PGM_PHYS_PAGE_RESERVED;
1118 RT_FALL_THRU();
1119 case PGM_PAGE_STATE_SHARED:
1120 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1121
1122 /* Not allowed to write to ballooned pages. */
1123 case PGM_PAGE_STATE_BALLOONED:
1124 return VERR_PGM_PHYS_PAGE_BALLOONED;
1125 }
1126}
1127
1128
1129/**
1130 * Internal usage: Map the page specified by its GMM ID.
1131 *
1132 * This is similar to pgmPhysPageMap
1133 *
1134 * @returns VBox status code.
1135 *
1136 * @param pVM The cross context VM structure.
1137 * @param idPage The Page ID.
1138 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1139 * @param ppv Where to store the mapping address.
1140 *
1141 * @remarks Called from within the PGM critical section. The mapping is only
1142 * valid while you are inside this section.
1143 */
1144int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1145{
1146 /*
1147 * Validation.
1148 */
1149 PGM_LOCK_ASSERT_OWNER(pVM);
1150 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1151 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1152 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1153
1154#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1155 /*
1156 * Map it by HCPhys.
1157 */
1158 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1159
1160#elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1161# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1162 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
1163# else
1164 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1165# endif
1166
1167#else
1168 /*
1169 * Find/make Chunk TLB entry for the mapping chunk.
1170 */
1171 PPGMCHUNKR3MAP pMap;
1172 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1173 if (pTlbe->idChunk == idChunk)
1174 {
1175 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1176 pMap = pTlbe->pChunk;
1177 }
1178 else
1179 {
1180 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1181
1182 /*
1183 * Find the chunk, map it if necessary.
1184 */
1185 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1186 if (pMap)
1187 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1188 else
1189 {
1190# ifdef IN_RING0
1191 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1192 AssertRCReturn(rc, rc);
1193 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1194 Assert(pMap);
1195# else
1196 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1197 if (RT_FAILURE(rc))
1198 return rc;
1199# endif
1200 }
1201
1202 /*
1203 * Enter it into the Chunk TLB.
1204 */
1205 pTlbe->idChunk = idChunk;
1206 pTlbe->pChunk = pMap;
1207 }
1208
1209 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1210 return VINF_SUCCESS;
1211#endif
1212}
1213
1214
1215/**
1216 * Maps a page into the current virtual address space so it can be accessed.
1217 *
1218 * @returns VBox status code.
1219 * @retval VINF_SUCCESS on success.
1220 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1221 *
1222 * @param pVM The cross context VM structure.
1223 * @param pPage The physical page tracking structure.
1224 * @param GCPhys The address of the page.
1225 * @param ppMap Where to store the address of the mapping tracking structure.
1226 * @param ppv Where to store the mapping address of the page. The page
1227 * offset is masked off!
1228 *
1229 * @remarks Called from within the PGM critical section.
1230 */
1231static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1232{
1233 PGM_LOCK_ASSERT_OWNER(pVM);
1234 NOREF(GCPhys);
1235
1236#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1237 /*
1238 * Just some sketchy GC/R0-darwin code.
1239 */
1240 *ppMap = NULL;
1241 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1242 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1243 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1244 return VINF_SUCCESS;
1245
1246#else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1247
1248
1249 /*
1250 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1251 */
1252 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1253 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1254 {
1255 /* Decode the page id to a page in a MMIO2 ram range. */
1256 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1257 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1258 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1259 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1260 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1261 pPage->s.idPage, pPage->s.uStateY),
1262 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1263 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1264 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1265 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1266 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1267 *ppMap = NULL;
1268# if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1269 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1270# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1271 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << PAGE_SHIFT);
1272 return VINF_SUCCESS;
1273# else
1274 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1275 return VINF_SUCCESS;
1276# endif
1277 }
1278
1279 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1280 if (idChunk == NIL_GMM_CHUNKID)
1281 {
1282 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1283 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1284 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1285 {
1286 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1287 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1288 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1289 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1290 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1291 }
1292 else
1293 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1294 *ppMap = NULL;
1295 return VINF_SUCCESS;
1296 }
1297
1298# if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1299 /*
1300 * Just use the physical address.
1301 */
1302 *ppMap = NULL;
1303 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1304
1305# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1306 /*
1307 * Go by page ID thru GMMR0.
1308 */
1309 *ppMap = NULL;
1310 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1311
1312# else
1313 /*
1314 * Find/make Chunk TLB entry for the mapping chunk.
1315 */
1316 PPGMCHUNKR3MAP pMap;
1317 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1318 if (pTlbe->idChunk == idChunk)
1319 {
1320 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1321 pMap = pTlbe->pChunk;
1322 AssertPtr(pMap->pv);
1323 }
1324 else
1325 {
1326 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1327
1328 /*
1329 * Find the chunk, map it if necessary.
1330 */
1331 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1332 if (pMap)
1333 {
1334 AssertPtr(pMap->pv);
1335 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1336 }
1337 else
1338 {
1339# ifdef IN_RING0
1340 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1341 AssertRCReturn(rc, rc);
1342 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1343 Assert(pMap);
1344# else
1345 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1346 if (RT_FAILURE(rc))
1347 return rc;
1348# endif
1349 AssertPtr(pMap->pv);
1350 }
1351
1352 /*
1353 * Enter it into the Chunk TLB.
1354 */
1355 pTlbe->idChunk = idChunk;
1356 pTlbe->pChunk = pMap;
1357 }
1358
1359 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1360 *ppMap = pMap;
1361 return VINF_SUCCESS;
1362# endif /* !IN_RING0 || !VBOX_WITH_RAM_IN_KERNEL */
1363#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1364}
1365
1366
1367/**
1368 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1369 *
1370 * This is typically used is paths where we cannot use the TLB methods (like ROM
1371 * pages) or where there is no point in using them since we won't get many hits.
1372 *
1373 * @returns VBox strict status code.
1374 * @retval VINF_SUCCESS on success.
1375 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1376 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1377 *
1378 * @param pVM The cross context VM structure.
1379 * @param pPage The physical page tracking structure.
1380 * @param GCPhys The address of the page.
1381 * @param ppv Where to store the mapping address of the page. The page
1382 * offset is masked off!
1383 *
1384 * @remarks Called from within the PGM critical section. The mapping is only
1385 * valid while you are inside section.
1386 */
1387int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1388{
1389 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1390 if (RT_SUCCESS(rc))
1391 {
1392 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1393 PPGMPAGEMAP pMapIgnore;
1394 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1395 if (RT_FAILURE(rc2)) /* preserve rc */
1396 rc = rc2;
1397 }
1398 return rc;
1399}
1400
1401
1402/**
1403 * Maps a page into the current virtual address space so it can be accessed for
1404 * both writing and reading.
1405 *
1406 * This is typically used is paths where we cannot use the TLB methods (like ROM
1407 * pages) or where there is no point in using them since we won't get many hits.
1408 *
1409 * @returns VBox status code.
1410 * @retval VINF_SUCCESS on success.
1411 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1412 *
1413 * @param pVM The cross context VM structure.
1414 * @param pPage The physical page tracking structure. Must be in the
1415 * allocated state.
1416 * @param GCPhys The address of the page.
1417 * @param ppv Where to store the mapping address of the page. The page
1418 * offset is masked off!
1419 *
1420 * @remarks Called from within the PGM critical section. The mapping is only
1421 * valid while you are inside section.
1422 */
1423int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1424{
1425 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1426 PPGMPAGEMAP pMapIgnore;
1427 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1428}
1429
1430
1431/**
1432 * Maps a page into the current virtual address space so it can be accessed for
1433 * reading.
1434 *
1435 * This is typically used is paths where we cannot use the TLB methods (like ROM
1436 * pages) or where there is no point in using them since we won't get many hits.
1437 *
1438 * @returns VBox status code.
1439 * @retval VINF_SUCCESS on success.
1440 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1441 *
1442 * @param pVM The cross context VM structure.
1443 * @param pPage The physical page tracking structure.
1444 * @param GCPhys The address of the page.
1445 * @param ppv Where to store the mapping address of the page. The page
1446 * offset is masked off!
1447 *
1448 * @remarks Called from within the PGM critical section. The mapping is only
1449 * valid while you are inside this section.
1450 */
1451int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1452{
1453 PPGMPAGEMAP pMapIgnore;
1454 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1455}
1456
1457#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1458
1459/**
1460 * Load a guest page into the ring-3 physical TLB.
1461 *
1462 * @returns VBox status code.
1463 * @retval VINF_SUCCESS on success
1464 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1465 * @param pVM The cross context VM structure.
1466 * @param GCPhys The guest physical address in question.
1467 */
1468int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1469{
1470 PGM_LOCK_ASSERT_OWNER(pVM);
1471
1472 /*
1473 * Find the ram range and page and hand it over to the with-page function.
1474 * 99.8% of requests are expected to be in the first range.
1475 */
1476 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1477 if (!pPage)
1478 {
1479 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1480 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1481 }
1482
1483 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1484}
1485
1486
1487/**
1488 * Load a guest page into the ring-3 physical TLB.
1489 *
1490 * @returns VBox status code.
1491 * @retval VINF_SUCCESS on success
1492 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1493 *
1494 * @param pVM The cross context VM structure.
1495 * @param pPage Pointer to the PGMPAGE structure corresponding to
1496 * GCPhys.
1497 * @param GCPhys The guest physical address in question.
1498 */
1499int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1500{
1501 PGM_LOCK_ASSERT_OWNER(pVM);
1502 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1503
1504 /*
1505 * Map the page.
1506 * Make a special case for the zero page as it is kind of special.
1507 */
1508 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1509 if ( !PGM_PAGE_IS_ZERO(pPage)
1510 && !PGM_PAGE_IS_BALLOONED(pPage))
1511 {
1512 void *pv;
1513 PPGMPAGEMAP pMap;
1514 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1515 if (RT_FAILURE(rc))
1516 return rc;
1517# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1518 pTlbe->pMap = pMap;
1519# endif
1520 pTlbe->pv = pv;
1521 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1522 }
1523 else
1524 {
1525 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1526# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1527 pTlbe->pMap = NULL;
1528# endif
1529 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1530 }
1531# ifdef PGM_WITH_PHYS_TLB
1532 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1533 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1534 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1535 else
1536 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1537# else
1538 pTlbe->GCPhys = NIL_RTGCPHYS;
1539# endif
1540 pTlbe->pPage = pPage;
1541 return VINF_SUCCESS;
1542}
1543
1544#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1545
1546/**
1547 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1548 * own the PGM lock and therefore not need to lock the mapped page.
1549 *
1550 * @returns VBox status code.
1551 * @retval VINF_SUCCESS on success.
1552 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1553 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1554 *
1555 * @param pVM The cross context VM structure.
1556 * @param GCPhys The guest physical address of the page that should be mapped.
1557 * @param pPage Pointer to the PGMPAGE structure for the page.
1558 * @param ppv Where to store the address corresponding to GCPhys.
1559 *
1560 * @internal
1561 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1562 */
1563int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1564{
1565 int rc;
1566 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1567 PGM_LOCK_ASSERT_OWNER(pVM);
1568 pVM->pgm.s.cDeprecatedPageLocks++;
1569
1570 /*
1571 * Make sure the page is writable.
1572 */
1573 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1574 {
1575 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1576 if (RT_FAILURE(rc))
1577 return rc;
1578 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1579 }
1580 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1581
1582 /*
1583 * Get the mapping address.
1584 */
1585#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1586 void *pv;
1587 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1588 PGM_PAGE_GET_HCPHYS(pPage),
1589 &pv
1590 RTLOG_COMMA_SRC_POS);
1591 if (RT_FAILURE(rc))
1592 return rc;
1593 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1594#else
1595 PPGMPAGEMAPTLBE pTlbe;
1596 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1597 if (RT_FAILURE(rc))
1598 return rc;
1599 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1600#endif
1601 return VINF_SUCCESS;
1602}
1603
1604#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1605
1606/**
1607 * Locks a page mapping for writing.
1608 *
1609 * @param pVM The cross context VM structure.
1610 * @param pPage The page.
1611 * @param pTlbe The mapping TLB entry for the page.
1612 * @param pLock The lock structure (output).
1613 */
1614DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1615{
1616# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1617 PPGMPAGEMAP pMap = pTlbe->pMap;
1618 if (pMap)
1619 pMap->cRefs++;
1620# else
1621 RT_NOREF(pTlbe);
1622# endif
1623
1624 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1625 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1626 {
1627 if (cLocks == 0)
1628 pVM->pgm.s.cWriteLockedPages++;
1629 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1630 }
1631 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1632 {
1633 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1634 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1635# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1636 if (pMap)
1637 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1638# endif
1639 }
1640
1641 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1642# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1643 pLock->pvMap = pMap;
1644# else
1645 pLock->pvMap = NULL;
1646# endif
1647}
1648
1649/**
1650 * Locks a page mapping for reading.
1651 *
1652 * @param pVM The cross context VM structure.
1653 * @param pPage The page.
1654 * @param pTlbe The mapping TLB entry for the page.
1655 * @param pLock The lock structure (output).
1656 */
1657DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1658{
1659# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1660 PPGMPAGEMAP pMap = pTlbe->pMap;
1661 if (pMap)
1662 pMap->cRefs++;
1663# else
1664 RT_NOREF(pTlbe);
1665# endif
1666
1667 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1668 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1669 {
1670 if (cLocks == 0)
1671 pVM->pgm.s.cReadLockedPages++;
1672 PGM_PAGE_INC_READ_LOCKS(pPage);
1673 }
1674 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1675 {
1676 PGM_PAGE_INC_READ_LOCKS(pPage);
1677 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1678# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1679 if (pMap)
1680 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1681# endif
1682 }
1683
1684 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1685# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1686 pLock->pvMap = pMap;
1687# else
1688 pLock->pvMap = NULL;
1689# endif
1690}
1691
1692#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1693
1694
1695/**
1696 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1697 * own the PGM lock and have access to the page structure.
1698 *
1699 * @returns VBox status code.
1700 * @retval VINF_SUCCESS on success.
1701 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1702 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1703 *
1704 * @param pVM The cross context VM structure.
1705 * @param GCPhys The guest physical address of the page that should be mapped.
1706 * @param pPage Pointer to the PGMPAGE structure for the page.
1707 * @param ppv Where to store the address corresponding to GCPhys.
1708 * @param pLock Where to store the lock information that
1709 * pgmPhysReleaseInternalPageMappingLock needs.
1710 *
1711 * @internal
1712 */
1713int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1714{
1715 int rc;
1716 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1717 PGM_LOCK_ASSERT_OWNER(pVM);
1718
1719 /*
1720 * Make sure the page is writable.
1721 */
1722 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1723 {
1724 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1725 if (RT_FAILURE(rc))
1726 return rc;
1727 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1728 }
1729 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1730
1731 /*
1732 * Do the job.
1733 */
1734#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1735 void *pv;
1736 PVMCPU pVCpu = VMMGetCpu(pVM);
1737 rc = pgmRZDynMapHCPageInlined(pVCpu,
1738 PGM_PAGE_GET_HCPHYS(pPage),
1739 &pv
1740 RTLOG_COMMA_SRC_POS);
1741 if (RT_FAILURE(rc))
1742 return rc;
1743 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1744 pLock->pvPage = pv;
1745 pLock->pVCpu = pVCpu;
1746
1747#else
1748 PPGMPAGEMAPTLBE pTlbe;
1749 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1750 if (RT_FAILURE(rc))
1751 return rc;
1752 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1753 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1754#endif
1755 return VINF_SUCCESS;
1756}
1757
1758
1759/**
1760 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1761 * own the PGM lock and have access to the page structure.
1762 *
1763 * @returns VBox status code.
1764 * @retval VINF_SUCCESS on success.
1765 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1766 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1767 *
1768 * @param pVM The cross context VM structure.
1769 * @param GCPhys The guest physical address of the page that should be mapped.
1770 * @param pPage Pointer to the PGMPAGE structure for the page.
1771 * @param ppv Where to store the address corresponding to GCPhys.
1772 * @param pLock Where to store the lock information that
1773 * pgmPhysReleaseInternalPageMappingLock needs.
1774 *
1775 * @internal
1776 */
1777int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1778{
1779 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1780 PGM_LOCK_ASSERT_OWNER(pVM);
1781 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1782
1783 /*
1784 * Do the job.
1785 */
1786#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1787 void *pv;
1788 PVMCPU pVCpu = VMMGetCpu(pVM);
1789 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1790 PGM_PAGE_GET_HCPHYS(pPage),
1791 &pv
1792 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1793 if (RT_FAILURE(rc))
1794 return rc;
1795 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1796 pLock->pvPage = pv;
1797 pLock->pVCpu = pVCpu;
1798
1799#else
1800 PPGMPAGEMAPTLBE pTlbe;
1801 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1802 if (RT_FAILURE(rc))
1803 return rc;
1804 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1805 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1806#endif
1807 return VINF_SUCCESS;
1808}
1809
1810
1811/**
1812 * Requests the mapping of a guest page into the current context.
1813 *
1814 * This API should only be used for very short term, as it will consume scarse
1815 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1816 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1817 *
1818 * This API will assume your intention is to write to the page, and will
1819 * therefore replace shared and zero pages. If you do not intend to modify
1820 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1821 *
1822 * @returns VBox status code.
1823 * @retval VINF_SUCCESS on success.
1824 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1825 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1826 *
1827 * @param pVM The cross context VM structure.
1828 * @param GCPhys The guest physical address of the page that should be
1829 * mapped.
1830 * @param ppv Where to store the address corresponding to GCPhys.
1831 * @param pLock Where to store the lock information that
1832 * PGMPhysReleasePageMappingLock needs.
1833 *
1834 * @remarks The caller is responsible for dealing with access handlers.
1835 * @todo Add an informational return code for pages with access handlers?
1836 *
1837 * @remark Avoid calling this API from within critical sections (other than
1838 * the PGM one) because of the deadlock risk. External threads may
1839 * need to delegate jobs to the EMTs.
1840 * @remarks Only one page is mapped! Make no assumption about what's after or
1841 * before the returned page!
1842 * @thread Any thread.
1843 */
1844VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1845{
1846 int rc = pgmLock(pVM);
1847 AssertRCReturn(rc, rc);
1848
1849#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1850 /*
1851 * Find the page and make sure it's writable.
1852 */
1853 PPGMPAGE pPage;
1854 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1855 if (RT_SUCCESS(rc))
1856 {
1857 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1858 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1859 if (RT_SUCCESS(rc))
1860 {
1861 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1862
1863 PVMCPU pVCpu = VMMGetCpu(pVM);
1864 void *pv;
1865 rc = pgmRZDynMapHCPageInlined(pVCpu,
1866 PGM_PAGE_GET_HCPHYS(pPage),
1867 &pv
1868 RTLOG_COMMA_SRC_POS);
1869 if (RT_SUCCESS(rc))
1870 {
1871 AssertRCSuccess(rc);
1872
1873 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1874 *ppv = pv;
1875 pLock->pvPage = pv;
1876 pLock->pVCpu = pVCpu;
1877 }
1878 }
1879 }
1880
1881#else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1882 /*
1883 * Query the Physical TLB entry for the page (may fail).
1884 */
1885 PPGMPAGEMAPTLBE pTlbe;
1886 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1887 if (RT_SUCCESS(rc))
1888 {
1889 /*
1890 * If the page is shared, the zero page, or being write monitored
1891 * it must be converted to a page that's writable if possible.
1892 */
1893 PPGMPAGE pPage = pTlbe->pPage;
1894 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1895 {
1896 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1897 if (RT_SUCCESS(rc))
1898 {
1899 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1900 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1901 }
1902 }
1903 if (RT_SUCCESS(rc))
1904 {
1905 /*
1906 * Now, just perform the locking and calculate the return address.
1907 */
1908 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1909 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1910 }
1911 }
1912
1913#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1914 pgmUnlock(pVM);
1915 return rc;
1916}
1917
1918
1919/**
1920 * Requests the mapping of a guest page into the current context.
1921 *
1922 * This API should only be used for very short term, as it will consume scarse
1923 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1924 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1925 *
1926 * @returns VBox status code.
1927 * @retval VINF_SUCCESS on success.
1928 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1929 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1930 *
1931 * @param pVM The cross context VM structure.
1932 * @param GCPhys The guest physical address of the page that should be
1933 * mapped.
1934 * @param ppv Where to store the address corresponding to GCPhys.
1935 * @param pLock Where to store the lock information that
1936 * PGMPhysReleasePageMappingLock needs.
1937 *
1938 * @remarks The caller is responsible for dealing with access handlers.
1939 * @todo Add an informational return code for pages with access handlers?
1940 *
1941 * @remarks Avoid calling this API from within critical sections (other than
1942 * the PGM one) because of the deadlock risk.
1943 * @remarks Only one page is mapped! Make no assumption about what's after or
1944 * before the returned page!
1945 * @thread Any thread.
1946 */
1947VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1948{
1949 int rc = pgmLock(pVM);
1950 AssertRCReturn(rc, rc);
1951
1952#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1953 /*
1954 * Find the page and make sure it's readable.
1955 */
1956 PPGMPAGE pPage;
1957 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1958 if (RT_SUCCESS(rc))
1959 {
1960 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1961 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1962 else
1963 {
1964 PVMCPU pVCpu = VMMGetCpu(pVM);
1965 void *pv;
1966 rc = pgmRZDynMapHCPageInlined(pVCpu,
1967 PGM_PAGE_GET_HCPHYS(pPage),
1968 &pv
1969 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1970 if (RT_SUCCESS(rc))
1971 {
1972 AssertRCSuccess(rc);
1973
1974 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1975 *ppv = pv;
1976 pLock->pvPage = pv;
1977 pLock->pVCpu = pVCpu;
1978 }
1979 }
1980 }
1981
1982#else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1983 /*
1984 * Query the Physical TLB entry for the page (may fail).
1985 */
1986 PPGMPAGEMAPTLBE pTlbe;
1987 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1988 if (RT_SUCCESS(rc))
1989 {
1990 /* MMIO pages doesn't have any readable backing. */
1991 PPGMPAGE pPage = pTlbe->pPage;
1992 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1993 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1994 else
1995 {
1996 /*
1997 * Now, just perform the locking and calculate the return address.
1998 */
1999 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2000 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
2001 }
2002 }
2003
2004#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2005 pgmUnlock(pVM);
2006 return rc;
2007}
2008
2009
2010/**
2011 * Requests the mapping of a guest page given by virtual address into the current context.
2012 *
2013 * This API should only be used for very short term, as it will consume
2014 * scarse resources (R0 and GC) in the mapping cache. When you're done
2015 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2016 *
2017 * This API will assume your intention is to write to the page, and will
2018 * therefore replace shared and zero pages. If you do not intend to modify
2019 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2020 *
2021 * @returns VBox status code.
2022 * @retval VINF_SUCCESS on success.
2023 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2024 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2025 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2026 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2027 *
2028 * @param pVCpu The cross context virtual CPU structure.
2029 * @param GCPtr The guest physical address of the page that should be
2030 * mapped.
2031 * @param ppv Where to store the address corresponding to GCPhys.
2032 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2033 *
2034 * @remark Avoid calling this API from within critical sections (other than
2035 * the PGM one) because of the deadlock risk.
2036 * @thread EMT
2037 */
2038VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2039{
2040 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2041 RTGCPHYS GCPhys;
2042 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2043 if (RT_SUCCESS(rc))
2044 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2045 return rc;
2046}
2047
2048
2049/**
2050 * Requests the mapping of a guest page given by virtual address into the current context.
2051 *
2052 * This API should only be used for very short term, as it will consume
2053 * scarse resources (R0 and GC) in the mapping cache. When you're done
2054 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2055 *
2056 * @returns VBox status code.
2057 * @retval VINF_SUCCESS on success.
2058 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2059 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2060 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2061 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure.
2064 * @param GCPtr The guest physical address of the page that should be
2065 * mapped.
2066 * @param ppv Where to store the address corresponding to GCPtr.
2067 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2068 *
2069 * @remark Avoid calling this API from within critical sections (other than
2070 * the PGM one) because of the deadlock risk.
2071 * @thread EMT
2072 */
2073VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2074{
2075 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2076 RTGCPHYS GCPhys;
2077 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2078 if (RT_SUCCESS(rc))
2079 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2080 return rc;
2081}
2082
2083
2084/**
2085 * Release the mapping of a guest page.
2086 *
2087 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2088 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2089 *
2090 * @param pVM The cross context VM structure.
2091 * @param pLock The lock structure initialized by the mapping function.
2092 */
2093VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2094{
2095#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2096 Assert(pLock->pvPage != NULL);
2097 Assert(pLock->pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
2098 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
2099 pLock->pVCpu = NULL;
2100 pLock->pvPage = NULL;
2101
2102#else
2103# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
2104 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2105# endif
2106 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2107 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2108
2109 pLock->uPageAndType = 0;
2110 pLock->pvMap = NULL;
2111
2112 pgmLock(pVM);
2113 if (fWriteLock)
2114 {
2115 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2116 Assert(cLocks > 0);
2117 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2118 {
2119 if (cLocks == 1)
2120 {
2121 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2122 pVM->pgm.s.cWriteLockedPages--;
2123 }
2124 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2125 }
2126
2127 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2128 { /* probably extremely likely */ }
2129 else
2130 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2131 }
2132 else
2133 {
2134 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2135 Assert(cLocks > 0);
2136 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2137 {
2138 if (cLocks == 1)
2139 {
2140 Assert(pVM->pgm.s.cReadLockedPages > 0);
2141 pVM->pgm.s.cReadLockedPages--;
2142 }
2143 PGM_PAGE_DEC_READ_LOCKS(pPage);
2144 }
2145 }
2146
2147# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
2148 if (pMap)
2149 {
2150 Assert(pMap->cRefs >= 1);
2151 pMap->cRefs--;
2152 }
2153# endif
2154 pgmUnlock(pVM);
2155#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2156}
2157
2158
2159#ifdef IN_RING3
2160/**
2161 * Release the mapping of multiple guest pages.
2162 *
2163 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2164 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2165 *
2166 * @param pVM The cross context VM structure.
2167 * @param cPages Number of pages to unlock.
2168 * @param paLocks Array of locks lock structure initialized by the mapping
2169 * function.
2170 */
2171VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2172{
2173 Assert(cPages > 0);
2174 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2175#ifdef VBOX_STRICT
2176 for (uint32_t i = 1; i < cPages; i++)
2177 {
2178 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2179 AssertPtr(paLocks[i].uPageAndType);
2180 }
2181#endif
2182
2183 pgmLock(pVM);
2184 if (fWriteLock)
2185 {
2186 /*
2187 * Write locks:
2188 */
2189 for (uint32_t i = 0; i < cPages; i++)
2190 {
2191 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2192 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2193 Assert(cLocks > 0);
2194 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2195 {
2196 if (cLocks == 1)
2197 {
2198 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2199 pVM->pgm.s.cWriteLockedPages--;
2200 }
2201 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2202 }
2203
2204 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2205 { /* probably extremely likely */ }
2206 else
2207 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2208
2209 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2210 if (pMap)
2211 {
2212 Assert(pMap->cRefs >= 1);
2213 pMap->cRefs--;
2214 }
2215
2216 /* Yield the lock: */
2217 if ((i & 1023) == 1023)
2218 {
2219 pgmLock(pVM);
2220 pgmUnlock(pVM);
2221 }
2222 }
2223 }
2224 else
2225 {
2226 /*
2227 * Read locks:
2228 */
2229 for (uint32_t i = 0; i < cPages; i++)
2230 {
2231 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2232 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2233 Assert(cLocks > 0);
2234 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2235 {
2236 if (cLocks == 1)
2237 {
2238 Assert(pVM->pgm.s.cReadLockedPages > 0);
2239 pVM->pgm.s.cReadLockedPages--;
2240 }
2241 PGM_PAGE_DEC_READ_LOCKS(pPage);
2242 }
2243
2244 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2245 if (pMap)
2246 {
2247 Assert(pMap->cRefs >= 1);
2248 pMap->cRefs--;
2249 }
2250
2251 /* Yield the lock: */
2252 if ((i & 1023) == 1023)
2253 {
2254 pgmLock(pVM);
2255 pgmUnlock(pVM);
2256 }
2257 }
2258 }
2259 pgmUnlock(pVM);
2260
2261 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2262}
2263#endif /* IN_RING3 */
2264
2265
2266/**
2267 * Release the internal mapping of a guest page.
2268 *
2269 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2270 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2271 *
2272 * @param pVM The cross context VM structure.
2273 * @param pLock The lock structure initialized by the mapping function.
2274 *
2275 * @remarks Caller must hold the PGM lock.
2276 */
2277void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2278{
2279 PGM_LOCK_ASSERT_OWNER(pVM);
2280 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2281}
2282
2283
2284/**
2285 * Converts a GC physical address to a HC ring-3 pointer.
2286 *
2287 * @returns VINF_SUCCESS on success.
2288 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2289 * page but has no physical backing.
2290 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2291 * GC physical address.
2292 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2293 * a dynamic ram chunk boundary
2294 *
2295 * @param pVM The cross context VM structure.
2296 * @param GCPhys The GC physical address to convert.
2297 * @param pR3Ptr Where to store the R3 pointer on success.
2298 *
2299 * @deprecated Avoid when possible!
2300 */
2301int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2302{
2303/** @todo this is kind of hacky and needs some more work. */
2304#ifndef DEBUG_sandervl
2305 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2306#endif
2307
2308 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2309#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2310 NOREF(pVM); NOREF(pR3Ptr); RT_NOREF_PV(GCPhys);
2311 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2312#else
2313 pgmLock(pVM);
2314
2315 PPGMRAMRANGE pRam;
2316 PPGMPAGE pPage;
2317 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2318 if (RT_SUCCESS(rc))
2319 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2320
2321 pgmUnlock(pVM);
2322 Assert(rc <= VINF_SUCCESS);
2323 return rc;
2324#endif
2325}
2326
2327#if 0 /*def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2328
2329/**
2330 * Maps and locks a guest CR3 or PD (PAE) page.
2331 *
2332 * @returns VINF_SUCCESS on success.
2333 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2334 * page but has no physical backing.
2335 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2336 * GC physical address.
2337 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2338 * a dynamic ram chunk boundary
2339 *
2340 * @param pVM The cross context VM structure.
2341 * @param GCPhys The GC physical address to convert.
2342 * @param pR3Ptr Where to store the R3 pointer on success. This may or
2343 * may not be valid in ring-0 depending on the
2344 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
2345 *
2346 * @remarks The caller must own the PGM lock.
2347 */
2348int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2349{
2350
2351 PPGMRAMRANGE pRam;
2352 PPGMPAGE pPage;
2353 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2354 if (RT_SUCCESS(rc))
2355 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2356 Assert(rc <= VINF_SUCCESS);
2357 return rc;
2358}
2359
2360
2361int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2362{
2363
2364}
2365
2366#endif
2367
2368/**
2369 * Converts a guest pointer to a GC physical address.
2370 *
2371 * This uses the current CR3/CR0/CR4 of the guest.
2372 *
2373 * @returns VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure.
2375 * @param GCPtr The guest pointer to convert.
2376 * @param pGCPhys Where to store the GC physical address.
2377 */
2378VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2379{
2380 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2381 if (pGCPhys && RT_SUCCESS(rc))
2382 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2383 return rc;
2384}
2385
2386
2387/**
2388 * Converts a guest pointer to a HC physical address.
2389 *
2390 * This uses the current CR3/CR0/CR4 of the guest.
2391 *
2392 * @returns VBox status code.
2393 * @param pVCpu The cross context virtual CPU structure.
2394 * @param GCPtr The guest pointer to convert.
2395 * @param pHCPhys Where to store the HC physical address.
2396 */
2397VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2398{
2399 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2400 RTGCPHYS GCPhys;
2401 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2402 if (RT_SUCCESS(rc))
2403 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2404 return rc;
2405}
2406
2407
2408
2409#undef LOG_GROUP
2410#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2411
2412
2413#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2414/**
2415 * Cache PGMPhys memory access
2416 *
2417 * @param pVM The cross context VM structure.
2418 * @param pCache Cache structure pointer
2419 * @param GCPhys GC physical address
2420 * @param pbHC HC pointer corresponding to physical page
2421 *
2422 * @thread EMT.
2423 */
2424static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2425{
2426 uint32_t iCacheIndex;
2427
2428 Assert(VM_IS_EMT(pVM));
2429
2430 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2431 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2432
2433 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2434
2435 ASMBitSet(&pCache->aEntries, iCacheIndex);
2436
2437 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2438 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2439}
2440#endif /* IN_RING3 */
2441
2442
2443/**
2444 * Deals with reading from a page with one or more ALL access handlers.
2445 *
2446 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2447 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2448 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2449 *
2450 * @param pVM The cross context VM structure.
2451 * @param pPage The page descriptor.
2452 * @param GCPhys The physical address to start reading at.
2453 * @param pvBuf Where to put the bits we read.
2454 * @param cb How much to read - less or equal to a page.
2455 * @param enmOrigin The origin of this call.
2456 */
2457static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2458 PGMACCESSORIGIN enmOrigin)
2459{
2460 /*
2461 * The most frequent access here is MMIO and shadowed ROM.
2462 * The current code ASSUMES all these access handlers covers full pages!
2463 */
2464
2465 /*
2466 * Whatever we do we need the source page, map it first.
2467 */
2468 PGMPAGEMAPLOCK PgMpLck;
2469 const void *pvSrc = NULL;
2470 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2471/** @todo Check how this can work for MMIO pages? */
2472 if (RT_FAILURE(rc))
2473 {
2474 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2475 GCPhys, pPage, rc));
2476 memset(pvBuf, 0xff, cb);
2477 return VINF_SUCCESS;
2478 }
2479
2480 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2481
2482 /*
2483 * Deal with any physical handlers.
2484 */
2485 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2486 PPGMPHYSHANDLER pPhys = NULL;
2487 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2488 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2489 {
2490 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2491 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2492 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2493 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2494 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2495#ifndef IN_RING3
2496 if (enmOrigin != PGMACCESSORIGIN_IEM)
2497 {
2498 /* Cannot reliably handle informational status codes in this context */
2499 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2500 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2501 }
2502#endif
2503 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2504 void *pvUser = pPhys->CTX_SUFF(pvUser);
2505
2506 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2507 STAM_PROFILE_START(&pPhys->Stat, h);
2508 PGM_LOCK_ASSERT_OWNER(pVM);
2509
2510 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2511 pgmUnlock(pVM);
2512 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2513 pgmLock(pVM);
2514
2515#ifdef VBOX_WITH_STATISTICS
2516 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2517 if (pPhys)
2518 STAM_PROFILE_STOP(&pPhys->Stat, h);
2519#else
2520 pPhys = NULL; /* might not be valid anymore. */
2521#endif
2522 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2523 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2524 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2525 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2526 {
2527 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2528 return rcStrict;
2529 }
2530 }
2531
2532 /*
2533 * Take the default action.
2534 */
2535 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2536 {
2537 memcpy(pvBuf, pvSrc, cb);
2538 rcStrict = VINF_SUCCESS;
2539 }
2540 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2541 return rcStrict;
2542}
2543
2544
2545/**
2546 * Read physical memory.
2547 *
2548 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2549 * want to ignore those.
2550 *
2551 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2552 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2553 * @retval VINF_SUCCESS in all context - read completed.
2554 *
2555 * @retval VINF_EM_OFF in RC and R0 - read completed.
2556 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2557 * @retval VINF_EM_RESET in RC and R0 - read completed.
2558 * @retval VINF_EM_HALT in RC and R0 - read completed.
2559 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2560 *
2561 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2562 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2563 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2564 *
2565 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2566 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2567 *
2568 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2569 *
2570 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2571 * haven't been cleared for strict status codes yet.
2572 *
2573 * @param pVM The cross context VM structure.
2574 * @param GCPhys Physical address start reading from.
2575 * @param pvBuf Where to put the read bits.
2576 * @param cbRead How many bytes to read.
2577 * @param enmOrigin The origin of this call.
2578 */
2579VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2580{
2581 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2582 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2583
2584 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2585 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2586
2587 pgmLock(pVM);
2588
2589 /*
2590 * Copy loop on ram ranges.
2591 */
2592 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2593 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2594 for (;;)
2595 {
2596 /* Inside range or not? */
2597 if (pRam && GCPhys >= pRam->GCPhys)
2598 {
2599 /*
2600 * Must work our way thru this page by page.
2601 */
2602 RTGCPHYS off = GCPhys - pRam->GCPhys;
2603 while (off < pRam->cb)
2604 {
2605 unsigned iPage = off >> PAGE_SHIFT;
2606 PPGMPAGE pPage = &pRam->aPages[iPage];
2607 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2608 if (cb > cbRead)
2609 cb = cbRead;
2610
2611 /*
2612 * Normal page? Get the pointer to it.
2613 */
2614 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2615 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2616 {
2617 /*
2618 * Get the pointer to the page.
2619 */
2620 PGMPAGEMAPLOCK PgMpLck;
2621 const void *pvSrc;
2622 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2623 if (RT_SUCCESS(rc))
2624 {
2625 memcpy(pvBuf, pvSrc, cb);
2626 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2627 }
2628 else
2629 {
2630 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2631 pRam->GCPhys + off, pPage, rc));
2632 memset(pvBuf, 0xff, cb);
2633 }
2634 }
2635 /*
2636 * Have ALL/MMIO access handlers.
2637 */
2638 else
2639 {
2640 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2641 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2642 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2643 else
2644 {
2645 memset(pvBuf, 0xff, cb);
2646 pgmUnlock(pVM);
2647 return rcStrict2;
2648 }
2649 }
2650
2651 /* next page */
2652 if (cb >= cbRead)
2653 {
2654 pgmUnlock(pVM);
2655 return rcStrict;
2656 }
2657 cbRead -= cb;
2658 off += cb;
2659 pvBuf = (char *)pvBuf + cb;
2660 } /* walk pages in ram range. */
2661
2662 GCPhys = pRam->GCPhysLast + 1;
2663 }
2664 else
2665 {
2666 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2667
2668 /*
2669 * Unassigned address space.
2670 */
2671 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2672 if (cb >= cbRead)
2673 {
2674 memset(pvBuf, 0xff, cbRead);
2675 break;
2676 }
2677 memset(pvBuf, 0xff, cb);
2678
2679 cbRead -= cb;
2680 pvBuf = (char *)pvBuf + cb;
2681 GCPhys += cb;
2682 }
2683
2684 /* Advance range if necessary. */
2685 while (pRam && GCPhys > pRam->GCPhysLast)
2686 pRam = pRam->CTX_SUFF(pNext);
2687 } /* Ram range walk */
2688
2689 pgmUnlock(pVM);
2690 return rcStrict;
2691}
2692
2693
2694/**
2695 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2696 *
2697 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2698 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2699 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2700 *
2701 * @param pVM The cross context VM structure.
2702 * @param pPage The page descriptor.
2703 * @param GCPhys The physical address to start writing at.
2704 * @param pvBuf What to write.
2705 * @param cbWrite How much to write - less or equal to a page.
2706 * @param enmOrigin The origin of this call.
2707 */
2708static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2709 PGMACCESSORIGIN enmOrigin)
2710{
2711 PGMPAGEMAPLOCK PgMpLck;
2712 void *pvDst = NULL;
2713 VBOXSTRICTRC rcStrict;
2714
2715 /*
2716 * Give priority to physical handlers (like #PF does).
2717 *
2718 * Hope for a lonely physical handler first that covers the whole
2719 * write area. This should be a pretty frequent case with MMIO and
2720 * the heavy usage of full page handlers in the page pool.
2721 */
2722 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2723 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2724 if (pCur)
2725 {
2726 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2727#ifndef IN_RING3
2728 if (enmOrigin != PGMACCESSORIGIN_IEM)
2729 /* Cannot reliably handle informational status codes in this context */
2730 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2731#endif
2732 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2733 if (cbRange > cbWrite)
2734 cbRange = cbWrite;
2735
2736 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2737 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2738 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2739 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2740 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2741 else
2742 rcStrict = VINF_SUCCESS;
2743 if (RT_SUCCESS(rcStrict))
2744 {
2745 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2746 void *pvUser = pCur->CTX_SUFF(pvUser);
2747 STAM_PROFILE_START(&pCur->Stat, h);
2748
2749 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2750 PGM_LOCK_ASSERT_OWNER(pVM);
2751 pgmUnlock(pVM);
2752 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2753 pgmLock(pVM);
2754
2755#ifdef VBOX_WITH_STATISTICS
2756 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2757 if (pCur)
2758 STAM_PROFILE_STOP(&pCur->Stat, h);
2759#else
2760 pCur = NULL; /* might not be valid anymore. */
2761#endif
2762 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2763 {
2764 if (pvDst)
2765 memcpy(pvDst, pvBuf, cbRange);
2766 rcStrict = VINF_SUCCESS;
2767 }
2768 else
2769 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2770 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2771 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2772 }
2773 else
2774 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2775 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2776 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2777 {
2778 if (pvDst)
2779 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2780 return rcStrict;
2781 }
2782
2783 /* more fun to be had below */
2784 cbWrite -= cbRange;
2785 GCPhys += cbRange;
2786 pvBuf = (uint8_t *)pvBuf + cbRange;
2787 pvDst = (uint8_t *)pvDst + cbRange;
2788 }
2789 else /* The handler is somewhere else in the page, deal with it below. */
2790 rcStrict = VINF_SUCCESS;
2791 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2792
2793 /*
2794 * Deal with all the odd ends (used to be deal with virt+phys).
2795 */
2796 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2797
2798 /* We need a writable destination page. */
2799 if (!pvDst)
2800 {
2801 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2802 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2803 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2804 rc2);
2805 }
2806
2807 /* The loop state (big + ugly). */
2808 PPGMPHYSHANDLER pPhys = NULL;
2809 uint32_t offPhys = PAGE_SIZE;
2810 uint32_t offPhysLast = PAGE_SIZE;
2811 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2812
2813 /* The loop. */
2814 for (;;)
2815 {
2816 if (fMorePhys && !pPhys)
2817 {
2818 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2819 if (pPhys)
2820 {
2821 offPhys = 0;
2822 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2823 }
2824 else
2825 {
2826 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2827 GCPhys, true /* fAbove */);
2828 if ( pPhys
2829 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2830 {
2831 offPhys = pPhys->Core.Key - GCPhys;
2832 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2833 }
2834 else
2835 {
2836 pPhys = NULL;
2837 fMorePhys = false;
2838 offPhys = offPhysLast = PAGE_SIZE;
2839 }
2840 }
2841 }
2842
2843 /*
2844 * Handle access to space without handlers (that's easy).
2845 */
2846 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2847 uint32_t cbRange = (uint32_t)cbWrite;
2848
2849 /*
2850 * Physical handler.
2851 */
2852 if (!offPhys)
2853 {
2854#ifndef IN_RING3
2855 if (enmOrigin != PGMACCESSORIGIN_IEM)
2856 /* Cannot reliably handle informational status codes in this context */
2857 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2858#endif
2859 if (cbRange > offPhysLast + 1)
2860 cbRange = offPhysLast + 1;
2861
2862 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2863 void *pvUser = pPhys->CTX_SUFF(pvUser);
2864
2865 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2866 STAM_PROFILE_START(&pPhys->Stat, h);
2867
2868 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2869 PGM_LOCK_ASSERT_OWNER(pVM);
2870 pgmUnlock(pVM);
2871 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2872 pgmLock(pVM);
2873
2874#ifdef VBOX_WITH_STATISTICS
2875 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2876 if (pPhys)
2877 STAM_PROFILE_STOP(&pPhys->Stat, h);
2878#else
2879 pPhys = NULL; /* might not be valid anymore. */
2880#endif
2881 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2882 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2883 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2884 }
2885
2886 /*
2887 * Execute the default action and merge the status codes.
2888 */
2889 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2890 {
2891 memcpy(pvDst, pvBuf, cbRange);
2892 rcStrict2 = VINF_SUCCESS;
2893 }
2894 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2895 {
2896 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2897 return rcStrict2;
2898 }
2899 else
2900 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2901
2902 /*
2903 * Advance if we've got more stuff to do.
2904 */
2905 if (cbRange >= cbWrite)
2906 {
2907 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2908 return rcStrict;
2909 }
2910
2911
2912 cbWrite -= cbRange;
2913 GCPhys += cbRange;
2914 pvBuf = (uint8_t *)pvBuf + cbRange;
2915 pvDst = (uint8_t *)pvDst + cbRange;
2916
2917 offPhys -= cbRange;
2918 offPhysLast -= cbRange;
2919 }
2920}
2921
2922
2923/**
2924 * Write to physical memory.
2925 *
2926 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2927 * want to ignore those.
2928 *
2929 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2930 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2931 * @retval VINF_SUCCESS in all context - write completed.
2932 *
2933 * @retval VINF_EM_OFF in RC and R0 - write completed.
2934 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2935 * @retval VINF_EM_RESET in RC and R0 - write completed.
2936 * @retval VINF_EM_HALT in RC and R0 - write completed.
2937 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2938 *
2939 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2940 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2941 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2942 *
2943 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2944 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2945 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2946 *
2947 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2948 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2949 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2950 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2951 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2952 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2953 *
2954 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2955 * haven't been cleared for strict status codes yet.
2956 *
2957 *
2958 * @param pVM The cross context VM structure.
2959 * @param GCPhys Physical address to write to.
2960 * @param pvBuf What to write.
2961 * @param cbWrite How many bytes to write.
2962 * @param enmOrigin Who is calling.
2963 */
2964VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2965{
2966 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2967 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2968 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2969
2970 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2971 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2972
2973 pgmLock(pVM);
2974
2975 /*
2976 * Copy loop on ram ranges.
2977 */
2978 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2979 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2980 for (;;)
2981 {
2982 /* Inside range or not? */
2983 if (pRam && GCPhys >= pRam->GCPhys)
2984 {
2985 /*
2986 * Must work our way thru this page by page.
2987 */
2988 RTGCPTR off = GCPhys - pRam->GCPhys;
2989 while (off < pRam->cb)
2990 {
2991 RTGCPTR iPage = off >> PAGE_SHIFT;
2992 PPGMPAGE pPage = &pRam->aPages[iPage];
2993 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2994 if (cb > cbWrite)
2995 cb = cbWrite;
2996
2997 /*
2998 * Normal page? Get the pointer to it.
2999 */
3000 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3001 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3002 {
3003 PGMPAGEMAPLOCK PgMpLck;
3004 void *pvDst;
3005 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3006 if (RT_SUCCESS(rc))
3007 {
3008 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3009 memcpy(pvDst, pvBuf, cb);
3010 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3011 }
3012 /* Ignore writes to ballooned pages. */
3013 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3014 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3015 pRam->GCPhys + off, pPage, rc));
3016 }
3017 /*
3018 * Active WRITE or ALL access handlers.
3019 */
3020 else
3021 {
3022 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3023 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3024 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3025 else
3026 {
3027 pgmUnlock(pVM);
3028 return rcStrict2;
3029 }
3030 }
3031
3032 /* next page */
3033 if (cb >= cbWrite)
3034 {
3035 pgmUnlock(pVM);
3036 return rcStrict;
3037 }
3038
3039 cbWrite -= cb;
3040 off += cb;
3041 pvBuf = (const char *)pvBuf + cb;
3042 } /* walk pages in ram range */
3043
3044 GCPhys = pRam->GCPhysLast + 1;
3045 }
3046 else
3047 {
3048 /*
3049 * Unassigned address space, skip it.
3050 */
3051 if (!pRam)
3052 break;
3053 size_t cb = pRam->GCPhys - GCPhys;
3054 if (cb >= cbWrite)
3055 break;
3056 cbWrite -= cb;
3057 pvBuf = (const char *)pvBuf + cb;
3058 GCPhys += cb;
3059 }
3060
3061 /* Advance range if necessary. */
3062 while (pRam && GCPhys > pRam->GCPhysLast)
3063 pRam = pRam->CTX_SUFF(pNext);
3064 } /* Ram range walk */
3065
3066 pgmUnlock(pVM);
3067 return rcStrict;
3068}
3069
3070
3071/**
3072 * Read from guest physical memory by GC physical address, bypassing
3073 * MMIO and access handlers.
3074 *
3075 * @returns VBox status code.
3076 * @param pVM The cross context VM structure.
3077 * @param pvDst The destination address.
3078 * @param GCPhysSrc The source address (GC physical address).
3079 * @param cb The number of bytes to read.
3080 */
3081VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3082{
3083 /*
3084 * Treat the first page as a special case.
3085 */
3086 if (!cb)
3087 return VINF_SUCCESS;
3088
3089 /* map the 1st page */
3090 void const *pvSrc;
3091 PGMPAGEMAPLOCK Lock;
3092 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3093 if (RT_FAILURE(rc))
3094 return rc;
3095
3096 /* optimize for the case where access is completely within the first page. */
3097 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
3098 if (RT_LIKELY(cb <= cbPage))
3099 {
3100 memcpy(pvDst, pvSrc, cb);
3101 PGMPhysReleasePageMappingLock(pVM, &Lock);
3102 return VINF_SUCCESS;
3103 }
3104
3105 /* copy to the end of the page. */
3106 memcpy(pvDst, pvSrc, cbPage);
3107 PGMPhysReleasePageMappingLock(pVM, &Lock);
3108 GCPhysSrc += cbPage;
3109 pvDst = (uint8_t *)pvDst + cbPage;
3110 cb -= cbPage;
3111
3112 /*
3113 * Page by page.
3114 */
3115 for (;;)
3116 {
3117 /* map the page */
3118 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3119 if (RT_FAILURE(rc))
3120 return rc;
3121
3122 /* last page? */
3123 if (cb <= PAGE_SIZE)
3124 {
3125 memcpy(pvDst, pvSrc, cb);
3126 PGMPhysReleasePageMappingLock(pVM, &Lock);
3127 return VINF_SUCCESS;
3128 }
3129
3130 /* copy the entire page and advance */
3131 memcpy(pvDst, pvSrc, PAGE_SIZE);
3132 PGMPhysReleasePageMappingLock(pVM, &Lock);
3133 GCPhysSrc += PAGE_SIZE;
3134 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3135 cb -= PAGE_SIZE;
3136 }
3137 /* won't ever get here. */
3138}
3139
3140
3141/**
3142 * Write to guest physical memory referenced by GC pointer.
3143 * Write memory to GC physical address in guest physical memory.
3144 *
3145 * This will bypass MMIO and access handlers.
3146 *
3147 * @returns VBox status code.
3148 * @param pVM The cross context VM structure.
3149 * @param GCPhysDst The GC physical address of the destination.
3150 * @param pvSrc The source buffer.
3151 * @param cb The number of bytes to write.
3152 */
3153VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3154{
3155 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3156
3157 /*
3158 * Treat the first page as a special case.
3159 */
3160 if (!cb)
3161 return VINF_SUCCESS;
3162
3163 /* map the 1st page */
3164 void *pvDst;
3165 PGMPAGEMAPLOCK Lock;
3166 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3167 if (RT_FAILURE(rc))
3168 return rc;
3169
3170 /* optimize for the case where access is completely within the first page. */
3171 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
3172 if (RT_LIKELY(cb <= cbPage))
3173 {
3174 memcpy(pvDst, pvSrc, cb);
3175 PGMPhysReleasePageMappingLock(pVM, &Lock);
3176 return VINF_SUCCESS;
3177 }
3178
3179 /* copy to the end of the page. */
3180 memcpy(pvDst, pvSrc, cbPage);
3181 PGMPhysReleasePageMappingLock(pVM, &Lock);
3182 GCPhysDst += cbPage;
3183 pvSrc = (const uint8_t *)pvSrc + cbPage;
3184 cb -= cbPage;
3185
3186 /*
3187 * Page by page.
3188 */
3189 for (;;)
3190 {
3191 /* map the page */
3192 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3193 if (RT_FAILURE(rc))
3194 return rc;
3195
3196 /* last page? */
3197 if (cb <= PAGE_SIZE)
3198 {
3199 memcpy(pvDst, pvSrc, cb);
3200 PGMPhysReleasePageMappingLock(pVM, &Lock);
3201 return VINF_SUCCESS;
3202 }
3203
3204 /* copy the entire page and advance */
3205 memcpy(pvDst, pvSrc, PAGE_SIZE);
3206 PGMPhysReleasePageMappingLock(pVM, &Lock);
3207 GCPhysDst += PAGE_SIZE;
3208 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3209 cb -= PAGE_SIZE;
3210 }
3211 /* won't ever get here. */
3212}
3213
3214
3215/**
3216 * Read from guest physical memory referenced by GC pointer.
3217 *
3218 * This function uses the current CR3/CR0/CR4 of the guest and will
3219 * bypass access handlers and not set any accessed bits.
3220 *
3221 * @returns VBox status code.
3222 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3223 * @param pvDst The destination address.
3224 * @param GCPtrSrc The source address (GC pointer).
3225 * @param cb The number of bytes to read.
3226 */
3227VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3228{
3229 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3230/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3231
3232 /*
3233 * Treat the first page as a special case.
3234 */
3235 if (!cb)
3236 return VINF_SUCCESS;
3237
3238 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3239 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3240
3241 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3242 * when many VCPUs are fighting for the lock.
3243 */
3244 pgmLock(pVM);
3245
3246 /* map the 1st page */
3247 void const *pvSrc;
3248 PGMPAGEMAPLOCK Lock;
3249 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3250 if (RT_FAILURE(rc))
3251 {
3252 pgmUnlock(pVM);
3253 return rc;
3254 }
3255
3256 /* optimize for the case where access is completely within the first page. */
3257 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3258 if (RT_LIKELY(cb <= cbPage))
3259 {
3260 memcpy(pvDst, pvSrc, cb);
3261 PGMPhysReleasePageMappingLock(pVM, &Lock);
3262 pgmUnlock(pVM);
3263 return VINF_SUCCESS;
3264 }
3265
3266 /* copy to the end of the page. */
3267 memcpy(pvDst, pvSrc, cbPage);
3268 PGMPhysReleasePageMappingLock(pVM, &Lock);
3269 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3270 pvDst = (uint8_t *)pvDst + cbPage;
3271 cb -= cbPage;
3272
3273 /*
3274 * Page by page.
3275 */
3276 for (;;)
3277 {
3278 /* map the page */
3279 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3280 if (RT_FAILURE(rc))
3281 {
3282 pgmUnlock(pVM);
3283 return rc;
3284 }
3285
3286 /* last page? */
3287 if (cb <= PAGE_SIZE)
3288 {
3289 memcpy(pvDst, pvSrc, cb);
3290 PGMPhysReleasePageMappingLock(pVM, &Lock);
3291 pgmUnlock(pVM);
3292 return VINF_SUCCESS;
3293 }
3294
3295 /* copy the entire page and advance */
3296 memcpy(pvDst, pvSrc, PAGE_SIZE);
3297 PGMPhysReleasePageMappingLock(pVM, &Lock);
3298 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3299 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3300 cb -= PAGE_SIZE;
3301 }
3302 /* won't ever get here. */
3303}
3304
3305
3306/**
3307 * Write to guest physical memory referenced by GC pointer.
3308 *
3309 * This function uses the current CR3/CR0/CR4 of the guest and will
3310 * bypass access handlers and not set dirty or accessed bits.
3311 *
3312 * @returns VBox status code.
3313 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3314 * @param GCPtrDst The destination address (GC pointer).
3315 * @param pvSrc The source address.
3316 * @param cb The number of bytes to write.
3317 */
3318VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3319{
3320 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3321 VMCPU_ASSERT_EMT(pVCpu);
3322
3323 /*
3324 * Treat the first page as a special case.
3325 */
3326 if (!cb)
3327 return VINF_SUCCESS;
3328
3329 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3330 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3331
3332 /* map the 1st page */
3333 void *pvDst;
3334 PGMPAGEMAPLOCK Lock;
3335 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3336 if (RT_FAILURE(rc))
3337 return rc;
3338
3339 /* optimize for the case where access is completely within the first page. */
3340 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3341 if (RT_LIKELY(cb <= cbPage))
3342 {
3343 memcpy(pvDst, pvSrc, cb);
3344 PGMPhysReleasePageMappingLock(pVM, &Lock);
3345 return VINF_SUCCESS;
3346 }
3347
3348 /* copy to the end of the page. */
3349 memcpy(pvDst, pvSrc, cbPage);
3350 PGMPhysReleasePageMappingLock(pVM, &Lock);
3351 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3352 pvSrc = (const uint8_t *)pvSrc + cbPage;
3353 cb -= cbPage;
3354
3355 /*
3356 * Page by page.
3357 */
3358 for (;;)
3359 {
3360 /* map the page */
3361 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3362 if (RT_FAILURE(rc))
3363 return rc;
3364
3365 /* last page? */
3366 if (cb <= PAGE_SIZE)
3367 {
3368 memcpy(pvDst, pvSrc, cb);
3369 PGMPhysReleasePageMappingLock(pVM, &Lock);
3370 return VINF_SUCCESS;
3371 }
3372
3373 /* copy the entire page and advance */
3374 memcpy(pvDst, pvSrc, PAGE_SIZE);
3375 PGMPhysReleasePageMappingLock(pVM, &Lock);
3376 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3377 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3378 cb -= PAGE_SIZE;
3379 }
3380 /* won't ever get here. */
3381}
3382
3383
3384/**
3385 * Write to guest physical memory referenced by GC pointer and update the PTE.
3386 *
3387 * This function uses the current CR3/CR0/CR4 of the guest and will
3388 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3389 *
3390 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3391 *
3392 * @returns VBox status code.
3393 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3394 * @param GCPtrDst The destination address (GC pointer).
3395 * @param pvSrc The source address.
3396 * @param cb The number of bytes to write.
3397 */
3398VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3399{
3400 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3401 VMCPU_ASSERT_EMT(pVCpu);
3402
3403 /*
3404 * Treat the first page as a special case.
3405 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3406 */
3407 if (!cb)
3408 return VINF_SUCCESS;
3409
3410 /* map the 1st page */
3411 void *pvDst;
3412 PGMPAGEMAPLOCK Lock;
3413 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3414 if (RT_FAILURE(rc))
3415 return rc;
3416
3417 /* optimize for the case where access is completely within the first page. */
3418 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3419 if (RT_LIKELY(cb <= cbPage))
3420 {
3421 memcpy(pvDst, pvSrc, cb);
3422 PGMPhysReleasePageMappingLock(pVM, &Lock);
3423 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3424 return VINF_SUCCESS;
3425 }
3426
3427 /* copy to the end of the page. */
3428 memcpy(pvDst, pvSrc, cbPage);
3429 PGMPhysReleasePageMappingLock(pVM, &Lock);
3430 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3431 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3432 pvSrc = (const uint8_t *)pvSrc + cbPage;
3433 cb -= cbPage;
3434
3435 /*
3436 * Page by page.
3437 */
3438 for (;;)
3439 {
3440 /* map the page */
3441 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3442 if (RT_FAILURE(rc))
3443 return rc;
3444
3445 /* last page? */
3446 if (cb <= PAGE_SIZE)
3447 {
3448 memcpy(pvDst, pvSrc, cb);
3449 PGMPhysReleasePageMappingLock(pVM, &Lock);
3450 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3451 return VINF_SUCCESS;
3452 }
3453
3454 /* copy the entire page and advance */
3455 memcpy(pvDst, pvSrc, PAGE_SIZE);
3456 PGMPhysReleasePageMappingLock(pVM, &Lock);
3457 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3458 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3459 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3460 cb -= PAGE_SIZE;
3461 }
3462 /* won't ever get here. */
3463}
3464
3465
3466/**
3467 * Read from guest physical memory referenced by GC pointer.
3468 *
3469 * This function uses the current CR3/CR0/CR4 of the guest and will
3470 * respect access handlers and set accessed bits.
3471 *
3472 * @returns Strict VBox status, see PGMPhysRead for details.
3473 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3474 * specified virtual address.
3475 *
3476 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3477 * @param pvDst The destination address.
3478 * @param GCPtrSrc The source address (GC pointer).
3479 * @param cb The number of bytes to read.
3480 * @param enmOrigin Who is calling.
3481 * @thread EMT(pVCpu)
3482 */
3483VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3484{
3485 RTGCPHYS GCPhys;
3486 uint64_t fFlags;
3487 int rc;
3488 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3489 VMCPU_ASSERT_EMT(pVCpu);
3490
3491 /*
3492 * Anything to do?
3493 */
3494 if (!cb)
3495 return VINF_SUCCESS;
3496
3497 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3498
3499 /*
3500 * Optimize reads within a single page.
3501 */
3502 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3503 {
3504 /* Convert virtual to physical address + flags */
3505 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3506 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3507 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3508
3509 /* mark the guest page as accessed. */
3510 if (!(fFlags & X86_PTE_A))
3511 {
3512 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3513 AssertRC(rc);
3514 }
3515
3516 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3517 }
3518
3519 /*
3520 * Page by page.
3521 */
3522 for (;;)
3523 {
3524 /* Convert virtual to physical address + flags */
3525 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3526 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3527 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3528
3529 /* mark the guest page as accessed. */
3530 if (!(fFlags & X86_PTE_A))
3531 {
3532 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3533 AssertRC(rc);
3534 }
3535
3536 /* copy */
3537 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3538 if (cbRead < cb)
3539 {
3540 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3541 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3542 { /* likely */ }
3543 else
3544 return rcStrict;
3545 }
3546 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3547 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3548
3549 /* next */
3550 Assert(cb > cbRead);
3551 cb -= cbRead;
3552 pvDst = (uint8_t *)pvDst + cbRead;
3553 GCPtrSrc += cbRead;
3554 }
3555}
3556
3557
3558/**
3559 * Write to guest physical memory referenced by GC pointer.
3560 *
3561 * This function uses the current CR3/CR0/CR4 of the guest and will
3562 * respect access handlers and set dirty and accessed bits.
3563 *
3564 * @returns Strict VBox status, see PGMPhysWrite for details.
3565 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3566 * specified virtual address.
3567 *
3568 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3569 * @param GCPtrDst The destination address (GC pointer).
3570 * @param pvSrc The source address.
3571 * @param cb The number of bytes to write.
3572 * @param enmOrigin Who is calling.
3573 */
3574VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3575{
3576 RTGCPHYS GCPhys;
3577 uint64_t fFlags;
3578 int rc;
3579 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3580 VMCPU_ASSERT_EMT(pVCpu);
3581
3582 /*
3583 * Anything to do?
3584 */
3585 if (!cb)
3586 return VINF_SUCCESS;
3587
3588 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3589
3590 /*
3591 * Optimize writes within a single page.
3592 */
3593 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3594 {
3595 /* Convert virtual to physical address + flags */
3596 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3597 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3598 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3599
3600 /* Mention when we ignore X86_PTE_RW... */
3601 if (!(fFlags & X86_PTE_RW))
3602 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3603
3604 /* Mark the guest page as accessed and dirty if necessary. */
3605 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3606 {
3607 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3608 AssertRC(rc);
3609 }
3610
3611 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3612 }
3613
3614 /*
3615 * Page by page.
3616 */
3617 for (;;)
3618 {
3619 /* Convert virtual to physical address + flags */
3620 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3621 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3622 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3623
3624 /* Mention when we ignore X86_PTE_RW... */
3625 if (!(fFlags & X86_PTE_RW))
3626 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3627
3628 /* Mark the guest page as accessed and dirty if necessary. */
3629 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3630 {
3631 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3632 AssertRC(rc);
3633 }
3634
3635 /* copy */
3636 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3637 if (cbWrite < cb)
3638 {
3639 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3640 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3641 { /* likely */ }
3642 else
3643 return rcStrict;
3644 }
3645 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3646 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3647
3648 /* next */
3649 Assert(cb > cbWrite);
3650 cb -= cbWrite;
3651 pvSrc = (uint8_t *)pvSrc + cbWrite;
3652 GCPtrDst += cbWrite;
3653 }
3654}
3655
3656
3657/**
3658 * Performs a read of guest virtual memory for instruction emulation.
3659 *
3660 * This will check permissions, raise exceptions and update the access bits.
3661 *
3662 * The current implementation will bypass all access handlers. It may later be
3663 * changed to at least respect MMIO.
3664 *
3665 *
3666 * @returns VBox status code suitable to scheduling.
3667 * @retval VINF_SUCCESS if the read was performed successfully.
3668 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3669 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3670 *
3671 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3672 * @param pCtxCore The context core.
3673 * @param pvDst Where to put the bytes we've read.
3674 * @param GCPtrSrc The source address.
3675 * @param cb The number of bytes to read. Not more than a page.
3676 *
3677 * @remark This function will dynamically map physical pages in GC. This may unmap
3678 * mappings done by the caller. Be careful!
3679 */
3680VMMDECL(int) PGMPhysInterpretedRead(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3681{
3682 NOREF(pCtxCore);
3683 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3684 Assert(cb <= PAGE_SIZE);
3685 VMCPU_ASSERT_EMT(pVCpu);
3686
3687/** @todo r=bird: This isn't perfect!
3688 * -# It's not checking for reserved bits being 1.
3689 * -# It's not correctly dealing with the access bit.
3690 * -# It's not respecting MMIO memory or any other access handlers.
3691 */
3692 /*
3693 * 1. Translate virtual to physical. This may fault.
3694 * 2. Map the physical address.
3695 * 3. Do the read operation.
3696 * 4. Set access bits if required.
3697 */
3698 int rc;
3699 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3700 if (cb <= cb1)
3701 {
3702 /*
3703 * Not crossing pages.
3704 */
3705 RTGCPHYS GCPhys;
3706 uint64_t fFlags;
3707 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3708 if (RT_SUCCESS(rc))
3709 {
3710 /** @todo we should check reserved bits ... */
3711 PGMPAGEMAPLOCK PgMpLck;
3712 void const *pvSrc;
3713 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3714 switch (rc)
3715 {
3716 case VINF_SUCCESS:
3717 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3718 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3719 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3720 break;
3721 case VERR_PGM_PHYS_PAGE_RESERVED:
3722 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3723 memset(pvDst, 0xff, cb);
3724 break;
3725 default:
3726 Assert(RT_FAILURE_NP(rc));
3727 return rc;
3728 }
3729
3730 /** @todo access bit emulation isn't 100% correct. */
3731 if (!(fFlags & X86_PTE_A))
3732 {
3733 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3734 AssertRC(rc);
3735 }
3736 return VINF_SUCCESS;
3737 }
3738 }
3739 else
3740 {
3741 /*
3742 * Crosses pages.
3743 */
3744 size_t cb2 = cb - cb1;
3745 uint64_t fFlags1;
3746 RTGCPHYS GCPhys1;
3747 uint64_t fFlags2;
3748 RTGCPHYS GCPhys2;
3749 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3750 if (RT_SUCCESS(rc))
3751 {
3752 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3753 if (RT_SUCCESS(rc))
3754 {
3755 /** @todo we should check reserved bits ... */
3756 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3757 PGMPAGEMAPLOCK PgMpLck;
3758 void const *pvSrc1;
3759 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3760 switch (rc)
3761 {
3762 case VINF_SUCCESS:
3763 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3764 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3765 break;
3766 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3767 memset(pvDst, 0xff, cb1);
3768 break;
3769 default:
3770 Assert(RT_FAILURE_NP(rc));
3771 return rc;
3772 }
3773
3774 void const *pvSrc2;
3775 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3776 switch (rc)
3777 {
3778 case VINF_SUCCESS:
3779 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3780 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3781 break;
3782 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3783 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3784 break;
3785 default:
3786 Assert(RT_FAILURE_NP(rc));
3787 return rc;
3788 }
3789
3790 if (!(fFlags1 & X86_PTE_A))
3791 {
3792 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3793 AssertRC(rc);
3794 }
3795 if (!(fFlags2 & X86_PTE_A))
3796 {
3797 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3798 AssertRC(rc);
3799 }
3800 return VINF_SUCCESS;
3801 }
3802 }
3803 }
3804
3805 /*
3806 * Raise a #PF.
3807 */
3808 uint32_t uErr;
3809
3810 /* Get the current privilege level. */
3811 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3812 switch (rc)
3813 {
3814 case VINF_SUCCESS:
3815 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3816 break;
3817
3818 case VERR_PAGE_NOT_PRESENT:
3819 case VERR_PAGE_TABLE_NOT_PRESENT:
3820 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3821 break;
3822
3823 default:
3824 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3825 return rc;
3826 }
3827 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3828 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
3829 if (RT_SUCCESS(rc))
3830 return VINF_EM_RAW_GUEST_TRAP;
3831 return rc;
3832}
3833
3834
3835/**
3836 * Performs a read of guest virtual memory for instruction emulation.
3837 *
3838 * This will check permissions, raise exceptions and update the access bits.
3839 *
3840 * The current implementation will bypass all access handlers. It may later be
3841 * changed to at least respect MMIO.
3842 *
3843 *
3844 * @returns VBox status code suitable to scheduling.
3845 * @retval VINF_SUCCESS if the read was performed successfully.
3846 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3847 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3848 *
3849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3850 * @param pCtxCore The context core.
3851 * @param pvDst Where to put the bytes we've read.
3852 * @param GCPtrSrc The source address.
3853 * @param cb The number of bytes to read. Not more than a page.
3854 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3855 * an appropriate error status will be returned (no
3856 * informational at all).
3857 *
3858 *
3859 * @remarks Takes the PGM lock.
3860 * @remarks A page fault on the 2nd page of the access will be raised without
3861 * writing the bits on the first page since we're ASSUMING that the
3862 * caller is emulating an instruction access.
3863 * @remarks This function will dynamically map physical pages in GC. This may
3864 * unmap mappings done by the caller. Be careful!
3865 */
3866VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3867 bool fRaiseTrap)
3868{
3869 NOREF(pCtxCore);
3870 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3871 Assert(cb <= PAGE_SIZE);
3872 VMCPU_ASSERT_EMT(pVCpu);
3873
3874 /*
3875 * 1. Translate virtual to physical. This may fault.
3876 * 2. Map the physical address.
3877 * 3. Do the read operation.
3878 * 4. Set access bits if required.
3879 */
3880 int rc;
3881 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3882 if (cb <= cb1)
3883 {
3884 /*
3885 * Not crossing pages.
3886 */
3887 RTGCPHYS GCPhys;
3888 uint64_t fFlags;
3889 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3890 if (RT_SUCCESS(rc))
3891 {
3892 if (1) /** @todo we should check reserved bits ... */
3893 {
3894 const void *pvSrc;
3895 PGMPAGEMAPLOCK Lock;
3896 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3897 switch (rc)
3898 {
3899 case VINF_SUCCESS:
3900 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3901 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3902 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3903 PGMPhysReleasePageMappingLock(pVM, &Lock);
3904 break;
3905 case VERR_PGM_PHYS_PAGE_RESERVED:
3906 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3907 memset(pvDst, 0xff, cb);
3908 break;
3909 default:
3910 AssertMsgFailed(("%Rrc\n", rc));
3911 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3912 return rc;
3913 }
3914
3915 if (!(fFlags & X86_PTE_A))
3916 {
3917 /** @todo access bit emulation isn't 100% correct. */
3918 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3919 AssertRC(rc);
3920 }
3921 return VINF_SUCCESS;
3922 }
3923 }
3924 }
3925 else
3926 {
3927 /*
3928 * Crosses pages.
3929 */
3930 size_t cb2 = cb - cb1;
3931 uint64_t fFlags1;
3932 RTGCPHYS GCPhys1;
3933 uint64_t fFlags2;
3934 RTGCPHYS GCPhys2;
3935 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3936 if (RT_SUCCESS(rc))
3937 {
3938 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3939 if (RT_SUCCESS(rc))
3940 {
3941 if (1) /** @todo we should check reserved bits ... */
3942 {
3943 const void *pvSrc;
3944 PGMPAGEMAPLOCK Lock;
3945 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3946 switch (rc)
3947 {
3948 case VINF_SUCCESS:
3949 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3950 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3951 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3952 PGMPhysReleasePageMappingLock(pVM, &Lock);
3953 break;
3954 case VERR_PGM_PHYS_PAGE_RESERVED:
3955 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3956 memset(pvDst, 0xff, cb1);
3957 break;
3958 default:
3959 AssertMsgFailed(("%Rrc\n", rc));
3960 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3961 return rc;
3962 }
3963
3964 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3965 switch (rc)
3966 {
3967 case VINF_SUCCESS:
3968 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3969 PGMPhysReleasePageMappingLock(pVM, &Lock);
3970 break;
3971 case VERR_PGM_PHYS_PAGE_RESERVED:
3972 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3973 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3974 break;
3975 default:
3976 AssertMsgFailed(("%Rrc\n", rc));
3977 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3978 return rc;
3979 }
3980
3981 if (!(fFlags1 & X86_PTE_A))
3982 {
3983 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3984 AssertRC(rc);
3985 }
3986 if (!(fFlags2 & X86_PTE_A))
3987 {
3988 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3989 AssertRC(rc);
3990 }
3991 return VINF_SUCCESS;
3992 }
3993 /* sort out which page */
3994 }
3995 else
3996 GCPtrSrc += cb1; /* fault on 2nd page */
3997 }
3998 }
3999
4000 /*
4001 * Raise a #PF if we're allowed to do that.
4002 */
4003 /* Calc the error bits. */
4004 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4005 uint32_t uErr;
4006 switch (rc)
4007 {
4008 case VINF_SUCCESS:
4009 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4010 rc = VERR_ACCESS_DENIED;
4011 break;
4012
4013 case VERR_PAGE_NOT_PRESENT:
4014 case VERR_PAGE_TABLE_NOT_PRESENT:
4015 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4016 break;
4017
4018 default:
4019 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
4020 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4021 return rc;
4022 }
4023 if (fRaiseTrap)
4024 {
4025 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
4026 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
4027 if (RT_SUCCESS(rc))
4028 return VINF_EM_RAW_GUEST_TRAP;
4029 return rc;
4030 }
4031 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
4032 return rc;
4033}
4034
4035
4036/**
4037 * Performs a write to guest virtual memory for instruction emulation.
4038 *
4039 * This will check permissions, raise exceptions and update the dirty and access
4040 * bits.
4041 *
4042 * @returns VBox status code suitable to scheduling.
4043 * @retval VINF_SUCCESS if the read was performed successfully.
4044 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
4045 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
4046 *
4047 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4048 * @param pCtxCore The context core.
4049 * @param GCPtrDst The destination address.
4050 * @param pvSrc What to write.
4051 * @param cb The number of bytes to write. Not more than a page.
4052 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
4053 * an appropriate error status will be returned (no
4054 * informational at all).
4055 *
4056 * @remarks Takes the PGM lock.
4057 * @remarks A page fault on the 2nd page of the access will be raised without
4058 * writing the bits on the first page since we're ASSUMING that the
4059 * caller is emulating an instruction access.
4060 * @remarks This function will dynamically map physical pages in GC. This may
4061 * unmap mappings done by the caller. Be careful!
4062 */
4063VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
4064 size_t cb, bool fRaiseTrap)
4065{
4066 NOREF(pCtxCore);
4067 Assert(cb <= PAGE_SIZE);
4068 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4069 VMCPU_ASSERT_EMT(pVCpu);
4070
4071 /*
4072 * 1. Translate virtual to physical. This may fault.
4073 * 2. Map the physical address.
4074 * 3. Do the write operation.
4075 * 4. Set access bits if required.
4076 */
4077 /** @todo Since this method is frequently used by EMInterpret or IOM
4078 * upon a write fault to an write access monitored page, we can
4079 * reuse the guest page table walking from the \#PF code. */
4080 int rc;
4081 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
4082 if (cb <= cb1)
4083 {
4084 /*
4085 * Not crossing pages.
4086 */
4087 RTGCPHYS GCPhys;
4088 uint64_t fFlags;
4089 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags, &GCPhys);
4090 if (RT_SUCCESS(rc))
4091 {
4092 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
4093 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4094 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
4095 {
4096 void *pvDst;
4097 PGMPAGEMAPLOCK Lock;
4098 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
4099 switch (rc)
4100 {
4101 case VINF_SUCCESS:
4102 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4103 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
4104 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
4105 PGMPhysReleasePageMappingLock(pVM, &Lock);
4106 break;
4107 case VERR_PGM_PHYS_PAGE_RESERVED:
4108 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4109 /* bit bucket */
4110 break;
4111 default:
4112 AssertMsgFailed(("%Rrc\n", rc));
4113 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4114 return rc;
4115 }
4116
4117 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
4118 {
4119 /** @todo dirty & access bit emulation isn't 100% correct. */
4120 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
4121 AssertRC(rc);
4122 }
4123 return VINF_SUCCESS;
4124 }
4125 rc = VERR_ACCESS_DENIED;
4126 }
4127 }
4128 else
4129 {
4130 /*
4131 * Crosses pages.
4132 */
4133 size_t cb2 = cb - cb1;
4134 uint64_t fFlags1;
4135 RTGCPHYS GCPhys1;
4136 uint64_t fFlags2;
4137 RTGCPHYS GCPhys2;
4138 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
4139 if (RT_SUCCESS(rc))
4140 {
4141 rc = PGMGstGetPage(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
4142 if (RT_SUCCESS(rc))
4143 {
4144 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
4145 && (fFlags2 & X86_PTE_RW))
4146 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4147 && CPUMGetGuestCPL(pVCpu) <= 2) )
4148 {
4149 void *pvDst;
4150 PGMPAGEMAPLOCK Lock;
4151 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
4152 switch (rc)
4153 {
4154 case VINF_SUCCESS:
4155 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4156 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
4157 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
4158 PGMPhysReleasePageMappingLock(pVM, &Lock);
4159 break;
4160 case VERR_PGM_PHYS_PAGE_RESERVED:
4161 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4162 /* bit bucket */
4163 break;
4164 default:
4165 AssertMsgFailed(("%Rrc\n", rc));
4166 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4167 return rc;
4168 }
4169
4170 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
4171 switch (rc)
4172 {
4173 case VINF_SUCCESS:
4174 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
4175 PGMPhysReleasePageMappingLock(pVM, &Lock);
4176 break;
4177 case VERR_PGM_PHYS_PAGE_RESERVED:
4178 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4179 /* bit bucket */
4180 break;
4181 default:
4182 AssertMsgFailed(("%Rrc\n", rc));
4183 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4184 return rc;
4185 }
4186
4187 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
4188 {
4189 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4190 AssertRC(rc);
4191 }
4192 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
4193 {
4194 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4195 AssertRC(rc);
4196 }
4197 return VINF_SUCCESS;
4198 }
4199 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
4200 GCPtrDst += cb1; /* fault on the 2nd page. */
4201 rc = VERR_ACCESS_DENIED;
4202 }
4203 else
4204 GCPtrDst += cb1; /* fault on the 2nd page. */
4205 }
4206 }
4207
4208 /*
4209 * Raise a #PF if we're allowed to do that.
4210 */
4211 /* Calc the error bits. */
4212 uint32_t uErr;
4213 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4214 switch (rc)
4215 {
4216 case VINF_SUCCESS:
4217 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4218 rc = VERR_ACCESS_DENIED;
4219 break;
4220
4221 case VERR_ACCESS_DENIED:
4222 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4223 break;
4224
4225 case VERR_PAGE_NOT_PRESENT:
4226 case VERR_PAGE_TABLE_NOT_PRESENT:
4227 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4228 break;
4229
4230 default:
4231 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4232 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4233 return rc;
4234 }
4235 if (fRaiseTrap)
4236 {
4237 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4238 rc = TRPMAssertXcptPF(pVCpu, GCPtrDst, uErr);
4239 if (RT_SUCCESS(rc))
4240 return VINF_EM_RAW_GUEST_TRAP;
4241 return rc;
4242 }
4243 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4244 return rc;
4245}
4246
4247
4248/**
4249 * Return the page type of the specified physical address.
4250 *
4251 * @returns The page type.
4252 * @param pVM The cross context VM structure.
4253 * @param GCPhys Guest physical address
4254 */
4255VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
4256{
4257 pgmLock(pVM);
4258 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4259 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4260 pgmUnlock(pVM);
4261
4262 return enmPgType;
4263}
4264
4265
4266/**
4267 * Converts a GC physical address to a HC ring-3 pointer, with some
4268 * additional checks.
4269 *
4270 * @returns VBox status code (no informational statuses).
4271 *
4272 * @param pVM The cross context VM structure.
4273 * @param pVCpu The cross context virtual CPU structure of the
4274 * calling EMT.
4275 * @param GCPhys The GC physical address to convert. This API mask
4276 * the A20 line when necessary.
4277 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
4278 * be done while holding the PGM lock.
4279 * @param ppb Where to store the pointer corresponding to GCPhys
4280 * on success.
4281 * @param pfTlb The TLB flags and revision. We only add stuff.
4282 *
4283 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
4284 * PGMPhysIemGCPhys2Ptr.
4285 *
4286 * @thread EMT(pVCpu).
4287 */
4288VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
4289#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4290 R3PTRTYPE(uint8_t *) *ppb,
4291#else
4292 R3R0PTRTYPE(uint8_t *) *ppb,
4293#endif
4294 uint64_t *pfTlb)
4295{
4296 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4297 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
4298
4299 pgmLock(pVM);
4300
4301 PPGMRAMRANGE pRam;
4302 PPGMPAGE pPage;
4303 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4304 if (RT_SUCCESS(rc))
4305 {
4306 if (!PGM_PAGE_IS_BALLOONED(pPage))
4307 {
4308 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4309 {
4310 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
4311 {
4312 /*
4313 * No access handler.
4314 */
4315 switch (PGM_PAGE_GET_STATE(pPage))
4316 {
4317 case PGM_PAGE_STATE_ALLOCATED:
4318 *pfTlb |= *puTlbPhysRev;
4319 break;
4320 case PGM_PAGE_STATE_BALLOONED:
4321 AssertFailed();
4322 RT_FALL_THRU();
4323 case PGM_PAGE_STATE_ZERO:
4324 case PGM_PAGE_STATE_SHARED:
4325 case PGM_PAGE_STATE_WRITE_MONITORED:
4326 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4327 break;
4328 }
4329#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4330 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4331 *ppb = NULL;
4332#else
4333 PPGMPAGEMAPTLBE pTlbe;
4334 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4335 AssertLogRelRCReturn(rc, rc);
4336 *ppb = (uint8_t *)pTlbe->pv;
4337#endif
4338 }
4339 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
4340 {
4341 /*
4342 * MMIO or similar all access handler: Catch all access.
4343 */
4344 *pfTlb |= *puTlbPhysRev
4345 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4346 *ppb = NULL;
4347 }
4348 else
4349 {
4350 /*
4351 * Write access handler: Catch write accesses if active.
4352 */
4353 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
4354 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4355 else
4356 switch (PGM_PAGE_GET_STATE(pPage))
4357 {
4358 case PGM_PAGE_STATE_ALLOCATED:
4359 *pfTlb |= *puTlbPhysRev;
4360 break;
4361 case PGM_PAGE_STATE_BALLOONED:
4362 AssertFailed();
4363 RT_FALL_THRU();
4364 case PGM_PAGE_STATE_ZERO:
4365 case PGM_PAGE_STATE_SHARED:
4366 case PGM_PAGE_STATE_WRITE_MONITORED:
4367 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4368 break;
4369 }
4370#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4371 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4372 *ppb = NULL;
4373#else
4374 PPGMPAGEMAPTLBE pTlbe;
4375 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4376 AssertLogRelRCReturn(rc, rc);
4377 *ppb = (uint8_t *)pTlbe->pv;
4378#endif
4379 }
4380 }
4381 else
4382 {
4383 /* Alias MMIO: For now, we catch all access. */
4384 *pfTlb |= *puTlbPhysRev
4385 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4386 *ppb = NULL;
4387 }
4388 }
4389 else
4390 {
4391 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
4392 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4393 *ppb = NULL;
4394 }
4395 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
4396 }
4397 else
4398 {
4399 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4400 *ppb = NULL;
4401 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
4402 }
4403
4404 pgmUnlock(pVM);
4405 return VINF_SUCCESS;
4406}
4407
4408
4409/**
4410 * Converts a GC physical address to a HC ring-3 pointer, with some
4411 * additional checks.
4412 *
4413 * @returns VBox status code (no informational statuses).
4414 * @retval VINF_SUCCESS on success.
4415 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4416 * access handler of some kind.
4417 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4418 * accesses or is odd in any way.
4419 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4420 *
4421 * @param pVM The cross context VM structure.
4422 * @param pVCpu The cross context virtual CPU structure of the
4423 * calling EMT.
4424 * @param GCPhys The GC physical address to convert. This API mask
4425 * the A20 line when necessary.
4426 * @param fWritable Whether write access is required.
4427 * @param fByPassHandlers Whether to bypass access handlers.
4428 * @param ppv Where to store the pointer corresponding to GCPhys
4429 * on success.
4430 * @param pLock
4431 *
4432 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4433 * @thread EMT(pVCpu).
4434 */
4435VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4436 void **ppv, PPGMPAGEMAPLOCK pLock)
4437{
4438 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4439
4440 pgmLock(pVM);
4441
4442 PPGMRAMRANGE pRam;
4443 PPGMPAGE pPage;
4444 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4445 if (RT_SUCCESS(rc))
4446 {
4447 if (PGM_PAGE_IS_BALLOONED(pPage))
4448 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4449 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4450 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4451 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4452 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4453 rc = VINF_SUCCESS;
4454 else
4455 {
4456 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4457 {
4458 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4459 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4460 }
4461 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4462 {
4463 Assert(!fByPassHandlers);
4464 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4465 }
4466 }
4467 if (RT_SUCCESS(rc))
4468 {
4469 int rc2;
4470
4471 /* Make sure what we return is writable. */
4472 if (fWritable)
4473 switch (PGM_PAGE_GET_STATE(pPage))
4474 {
4475 case PGM_PAGE_STATE_ALLOCATED:
4476 break;
4477 case PGM_PAGE_STATE_BALLOONED:
4478 AssertFailed();
4479 break;
4480 case PGM_PAGE_STATE_ZERO:
4481 case PGM_PAGE_STATE_SHARED:
4482 case PGM_PAGE_STATE_WRITE_MONITORED:
4483 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4484 AssertLogRelRCReturn(rc2, rc2);
4485 break;
4486 }
4487
4488#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4489 void *pv;
4490 rc = pgmRZDynMapHCPageInlined(pVCpu,
4491 PGM_PAGE_GET_HCPHYS(pPage),
4492 &pv
4493 RTLOG_COMMA_SRC_POS);
4494 if (RT_FAILURE(rc))
4495 return rc;
4496 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4497 pLock->pvPage = pv;
4498 pLock->pVCpu = pVCpu;
4499
4500#else
4501 /* Get a ring-3 mapping of the address. */
4502 PPGMPAGEMAPTLBE pTlbe;
4503 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4504 AssertLogRelRCReturn(rc2, rc2);
4505
4506 /* Lock it and calculate the address. */
4507 if (fWritable)
4508 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4509 else
4510 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4511 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4512#endif
4513
4514 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4515 }
4516 else
4517 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4518
4519 /* else: handler catching all access, no pointer returned. */
4520 }
4521 else
4522 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4523
4524 pgmUnlock(pVM);
4525 return rc;
4526}
4527
4528
4529/**
4530 * Checks if the give GCPhys page requires special handling for the given access
4531 * because it's MMIO or otherwise monitored.
4532 *
4533 * @returns VBox status code (no informational statuses).
4534 * @retval VINF_SUCCESS on success.
4535 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4536 * access handler of some kind.
4537 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4538 * accesses or is odd in any way.
4539 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4540 *
4541 * @param pVM The cross context VM structure.
4542 * @param GCPhys The GC physical address to convert. Since this is
4543 * only used for filling the REM TLB, the A20 mask must
4544 * be applied before calling this API.
4545 * @param fWritable Whether write access is required.
4546 * @param fByPassHandlers Whether to bypass access handlers.
4547 *
4548 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4549 * a stop gap thing that should be removed once there is a better TLB
4550 * for virtual address accesses.
4551 */
4552VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4553{
4554 pgmLock(pVM);
4555 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4556
4557 PPGMRAMRANGE pRam;
4558 PPGMPAGE pPage;
4559 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4560 if (RT_SUCCESS(rc))
4561 {
4562 if (PGM_PAGE_IS_BALLOONED(pPage))
4563 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4564 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4565 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4566 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4567 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4568 rc = VINF_SUCCESS;
4569 else
4570 {
4571 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4572 {
4573 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4574 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4575 }
4576 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4577 {
4578 Assert(!fByPassHandlers);
4579 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4580 }
4581 }
4582 }
4583
4584 pgmUnlock(pVM);
4585 return rc;
4586}
4587
4588
4589/**
4590 * Interface used by NEM to check what to do on a memory access exit.
4591 *
4592 * @returns VBox status code.
4593 * @param pVM The cross context VM structure.
4594 * @param pVCpu The cross context per virtual CPU structure.
4595 * Optional.
4596 * @param GCPhys The guest physical address.
4597 * @param fMakeWritable Whether to try make the page writable or not. If it
4598 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
4599 * be returned and the return code will be unaffected
4600 * @param pInfo Where to return the page information. This is
4601 * initialized even on failure.
4602 * @param pfnChecker Page in-sync checker callback. Optional.
4603 * @param pvUser User argument to pass to pfnChecker.
4604 */
4605VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4606 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4607{
4608 pgmLock(pVM);
4609
4610 PPGMPAGE pPage;
4611 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4612 if (RT_SUCCESS(rc))
4613 {
4614 /* Try make it writable if requested. */
4615 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4616 if (fMakeWritable)
4617 switch (PGM_PAGE_GET_STATE(pPage))
4618 {
4619 case PGM_PAGE_STATE_SHARED:
4620 case PGM_PAGE_STATE_WRITE_MONITORED:
4621 case PGM_PAGE_STATE_ZERO:
4622 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4623 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4624 rc = VINF_SUCCESS;
4625 break;
4626 }
4627
4628 /* Fill in the info. */
4629 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4630 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4631 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4632 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4633 pInfo->enmType = enmType;
4634 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4635 switch (PGM_PAGE_GET_STATE(pPage))
4636 {
4637 case PGM_PAGE_STATE_ALLOCATED:
4638 pInfo->fZeroPage = 0;
4639 break;
4640
4641 case PGM_PAGE_STATE_ZERO:
4642 pInfo->fZeroPage = 1;
4643 break;
4644
4645 case PGM_PAGE_STATE_WRITE_MONITORED:
4646 pInfo->fZeroPage = 0;
4647 break;
4648
4649 case PGM_PAGE_STATE_SHARED:
4650 pInfo->fZeroPage = 0;
4651 break;
4652
4653 case PGM_PAGE_STATE_BALLOONED:
4654 pInfo->fZeroPage = 1;
4655 break;
4656
4657 default:
4658 pInfo->fZeroPage = 1;
4659 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4660 }
4661
4662 /* Call the checker and update NEM state. */
4663 if (pfnChecker)
4664 {
4665 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4666 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4667 }
4668
4669 /* Done. */
4670 pgmUnlock(pVM);
4671 }
4672 else
4673 {
4674 pgmUnlock(pVM);
4675
4676 pInfo->HCPhys = NIL_RTHCPHYS;
4677 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4678 pInfo->u2NemState = 0;
4679 pInfo->fHasHandlers = 0;
4680 pInfo->fZeroPage = 0;
4681 pInfo->enmType = PGMPAGETYPE_INVALID;
4682 }
4683
4684 return rc;
4685}
4686
4687
4688/**
4689 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4690 * or higher.
4691 *
4692 * @returns VBox status code from callback.
4693 * @param pVM The cross context VM structure.
4694 * @param pVCpu The cross context per CPU structure. This is
4695 * optional as its only for passing to callback.
4696 * @param uMinState The minimum NEM state value to call on.
4697 * @param pfnCallback The callback function.
4698 * @param pvUser User argument for the callback.
4699 */
4700VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4701 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4702{
4703 /*
4704 * Just brute force this problem.
4705 */
4706 pgmLock(pVM);
4707 int rc = VINF_SUCCESS;
4708 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4709 {
4710 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4711 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4712 {
4713 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4714 if (u2State < uMinState)
4715 { /* likely */ }
4716 else
4717 {
4718 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4719 if (RT_SUCCESS(rc))
4720 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4721 else
4722 break;
4723 }
4724 }
4725 }
4726 pgmUnlock(pVM);
4727
4728 return rc;
4729}
4730
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette