VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 87626

Last change on this file since 87626 was 87141, checked in by vboxsync, 4 years ago

VMM: Remove VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 and the code it encloses as it is unused since the removal of x86 darwin support

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 160.3 KB
Line 
1/* $Id: PGMAllPhys.cpp 87141 2020-12-29 19:12:45Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
51 * Checks if valid physical access handler return code (normal handler, not PF).
52 *
53 * Checks if the given strict status code is one of the expected ones for a
54 * physical access handler in the current context.
55 *
56 * @returns true or false.
57 * @param a_rcStrict The status code.
58 * @param a_fWrite Whether it is a write or read being serviced.
59 *
60 * @remarks We wish to keep the list of statuses here as short as possible.
61 * When changing, please make sure to update the PGMPhysRead,
62 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
63 */
64#ifdef IN_RING3
65# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
66 ( (a_rcStrict) == VINF_SUCCESS \
67 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
68#elif defined(IN_RING0)
69#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
70 ( (a_rcStrict) == VINF_SUCCESS \
71 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
72 \
73 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
74 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
75 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
76 \
77 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
78 || (a_rcStrict) == VINF_EM_DBG_STOP \
79 || (a_rcStrict) == VINF_EM_DBG_EVENT \
80 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
81 || (a_rcStrict) == VINF_EM_OFF \
82 || (a_rcStrict) == VINF_EM_SUSPEND \
83 || (a_rcStrict) == VINF_EM_RESET \
84 )
85#else
86# error "Context?"
87#endif
88
89/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
90 * Checks if valid virtual access handler return code (normal handler, not PF).
91 *
92 * Checks if the given strict status code is one of the expected ones for a
93 * virtual access handler in the current context.
94 *
95 * @returns true or false.
96 * @param a_rcStrict The status code.
97 * @param a_fWrite Whether it is a write or read being serviced.
98 *
99 * @remarks We wish to keep the list of statuses here as short as possible.
100 * When changing, please make sure to update the PGMPhysRead,
101 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
102 */
103#ifdef IN_RING3
104# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
105 ( (a_rcStrict) == VINF_SUCCESS \
106 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
107#elif defined(IN_RING0)
108# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
109 (false /* no virtual handlers in ring-0! */ )
110#else
111# error "Context?"
112#endif
113
114
115
116#ifndef IN_RING3
117
118/**
119 * @callback_method_impl{FNPGMPHYSHANDLER,
120 * Dummy for forcing ring-3 handling of the access.}
121 */
122DECLEXPORT(VBOXSTRICTRC)
123pgmPhysHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
124 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
125{
126 NOREF(pVM); NOREF(pVCpu); NOREF(GCPhys); NOREF(pvPhys); NOREF(pvBuf); NOREF(cbBuf);
127 NOREF(enmAccessType); NOREF(enmOrigin); NOREF(pvUser);
128 return VINF_EM_RAW_EMULATE_INSTR;
129}
130
131
132/**
133 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
134 * Dummy for forcing ring-3 handling of the access.}
135 */
136VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
137 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
138{
139 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
140 return VINF_EM_RAW_EMULATE_INSTR;
141}
142
143
144/**
145 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
146 * \#PF access handler callback for guest ROM range write access.}
147 *
148 * @remarks The @a pvUser argument points to the PGMROMRANGE.
149 */
150DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
151 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
152{
153 int rc;
154 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
155 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
156 NOREF(uErrorCode); NOREF(pvFault);
157
158 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
159
160 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
161 switch (pRom->aPages[iPage].enmProt)
162 {
163 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
164 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
165 {
166 /*
167 * If it's a simple instruction which doesn't change the cpu state
168 * we will simply skip it. Otherwise we'll have to defer it to REM.
169 */
170 uint32_t cbOp;
171 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
172 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
173 if ( RT_SUCCESS(rc)
174 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
175 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
176 {
177 switch (pDis->bOpCode)
178 {
179 /** @todo Find other instructions we can safely skip, possibly
180 * adding this kind of detection to DIS or EM. */
181 case OP_MOV:
182 pRegFrame->rip += cbOp;
183 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
184 return VINF_SUCCESS;
185 }
186 }
187 break;
188 }
189
190 case PGMROMPROT_READ_RAM_WRITE_RAM:
191 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
192 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
193 AssertRC(rc);
194 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
195
196 case PGMROMPROT_READ_ROM_WRITE_RAM:
197 /* Handle it in ring-3 because it's *way* easier there. */
198 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
199 break;
200
201 default:
202 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
203 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
204 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
205 }
206
207 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
208 return VINF_EM_RAW_EMULATE_INSTR;
209}
210
211#endif /* !IN_RING3 */
212
213
214/**
215 * @callback_method_impl{FNPGMPHYSHANDLER,
216 * Access handler callback for ROM write accesses.}
217 *
218 * @remarks The @a pvUser argument points to the PGMROMRANGE.
219 */
220PGM_ALL_CB2_DECL(VBOXSTRICTRC)
221pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
222 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
223{
224 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
225 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
226 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
227 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
228 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
229 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);
230
231 if (enmAccessType == PGMACCESSTYPE_READ)
232 {
233 switch (pRomPage->enmProt)
234 {
235 /*
236 * Take the default action.
237 */
238 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
239 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
240 case PGMROMPROT_READ_ROM_WRITE_RAM:
241 case PGMROMPROT_READ_RAM_WRITE_RAM:
242 return VINF_PGM_HANDLER_DO_DEFAULT;
243
244 default:
245 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
246 pRom->aPages[iPage].enmProt, iPage, GCPhys),
247 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
248 }
249 }
250 else
251 {
252 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
253 switch (pRomPage->enmProt)
254 {
255 /*
256 * Ignore writes.
257 */
258 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
259 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
260 return VINF_SUCCESS;
261
262 /*
263 * Write to the RAM page.
264 */
265 case PGMROMPROT_READ_ROM_WRITE_RAM:
266 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
267 {
268 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
269 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
270
271 /*
272 * Take the lock, do lazy allocation, map the page and copy the data.
273 *
274 * Note that we have to bypass the mapping TLB since it works on
275 * guest physical addresses and entering the shadow page would
276 * kind of screw things up...
277 */
278 int rc = pgmLock(pVM);
279 AssertRC(rc);
280
281 PPGMPAGE pShadowPage = &pRomPage->Shadow;
282 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
283 {
284 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
285 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE);
286 }
287
288 void *pvDstPage;
289 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
290 if (RT_SUCCESS(rc))
291 {
292 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
293 pRomPage->LiveSave.fWrittenTo = true;
294
295 AssertMsg( rc == VINF_SUCCESS
296 || ( rc == VINF_PGM_SYNC_CR3
297 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
298 , ("%Rrc\n", rc));
299 rc = VINF_SUCCESS;
300 }
301
302 pgmUnlock(pVM);
303 return rc;
304 }
305
306 default:
307 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
308 pRom->aPages[iPage].enmProt, iPage, GCPhys),
309 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
310 }
311 }
312}
313
314
315/**
316 * Invalidates the RAM range TLBs.
317 *
318 * @param pVM The cross context VM structure.
319 */
320void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
321{
322 pgmLock(pVM);
323 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
324 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
325 pgmUnlock(pVM);
326}
327
328
329/**
330 * Tests if a value of type RTGCPHYS is negative if the type had been signed
331 * instead of unsigned.
332 *
333 * @returns @c true if negative, @c false if positive or zero.
334 * @param a_GCPhys The value to test.
335 * @todo Move me to iprt/types.h.
336 */
337#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
338
339
340/**
341 * Slow worker for pgmPhysGetRange.
342 *
343 * @copydoc pgmPhysGetRange
344 */
345PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
346{
347 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
348
349 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
350 while (pRam)
351 {
352 RTGCPHYS off = GCPhys - pRam->GCPhys;
353 if (off < pRam->cb)
354 {
355 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
356 return pRam;
357 }
358 if (RTGCPHYS_IS_NEGATIVE(off))
359 pRam = pRam->CTX_SUFF(pLeft);
360 else
361 pRam = pRam->CTX_SUFF(pRight);
362 }
363 return NULL;
364}
365
366
367/**
368 * Slow worker for pgmPhysGetRangeAtOrAbove.
369 *
370 * @copydoc pgmPhysGetRangeAtOrAbove
371 */
372PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
373{
374 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
375
376 PPGMRAMRANGE pLastLeft = NULL;
377 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
378 while (pRam)
379 {
380 RTGCPHYS off = GCPhys - pRam->GCPhys;
381 if (off < pRam->cb)
382 {
383 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
384 return pRam;
385 }
386 if (RTGCPHYS_IS_NEGATIVE(off))
387 {
388 pLastLeft = pRam;
389 pRam = pRam->CTX_SUFF(pLeft);
390 }
391 else
392 pRam = pRam->CTX_SUFF(pRight);
393 }
394 return pLastLeft;
395}
396
397
398/**
399 * Slow worker for pgmPhysGetPage.
400 *
401 * @copydoc pgmPhysGetPage
402 */
403PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
404{
405 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
406
407 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
408 while (pRam)
409 {
410 RTGCPHYS off = GCPhys - pRam->GCPhys;
411 if (off < pRam->cb)
412 {
413 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
414 return &pRam->aPages[off >> PAGE_SHIFT];
415 }
416
417 if (RTGCPHYS_IS_NEGATIVE(off))
418 pRam = pRam->CTX_SUFF(pLeft);
419 else
420 pRam = pRam->CTX_SUFF(pRight);
421 }
422 return NULL;
423}
424
425
426/**
427 * Slow worker for pgmPhysGetPageEx.
428 *
429 * @copydoc pgmPhysGetPageEx
430 */
431int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
432{
433 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
434
435 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
436 while (pRam)
437 {
438 RTGCPHYS off = GCPhys - pRam->GCPhys;
439 if (off < pRam->cb)
440 {
441 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
442 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
443 return VINF_SUCCESS;
444 }
445
446 if (RTGCPHYS_IS_NEGATIVE(off))
447 pRam = pRam->CTX_SUFF(pLeft);
448 else
449 pRam = pRam->CTX_SUFF(pRight);
450 }
451
452 *ppPage = NULL;
453 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
454}
455
456
457/**
458 * Slow worker for pgmPhysGetPageAndRangeEx.
459 *
460 * @copydoc pgmPhysGetPageAndRangeEx
461 */
462int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
463{
464 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
465
466 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
467 while (pRam)
468 {
469 RTGCPHYS off = GCPhys - pRam->GCPhys;
470 if (off < pRam->cb)
471 {
472 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
473 *ppRam = pRam;
474 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
475 return VINF_SUCCESS;
476 }
477
478 if (RTGCPHYS_IS_NEGATIVE(off))
479 pRam = pRam->CTX_SUFF(pLeft);
480 else
481 pRam = pRam->CTX_SUFF(pRight);
482 }
483
484 *ppRam = NULL;
485 *ppPage = NULL;
486 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
487}
488
489
490/**
491 * Checks if Address Gate 20 is enabled or not.
492 *
493 * @returns true if enabled.
494 * @returns false if disabled.
495 * @param pVCpu The cross context virtual CPU structure.
496 */
497VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
498{
499 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
500 return pVCpu->pgm.s.fA20Enabled;
501}
502
503
504/**
505 * Validates a GC physical address.
506 *
507 * @returns true if valid.
508 * @returns false if invalid.
509 * @param pVM The cross context VM structure.
510 * @param GCPhys The physical address to validate.
511 */
512VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
513{
514 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
515 return pPage != NULL;
516}
517
518
519/**
520 * Checks if a GC physical address is a normal page,
521 * i.e. not ROM, MMIO or reserved.
522 *
523 * @returns true if normal.
524 * @returns false if invalid, ROM, MMIO or reserved page.
525 * @param pVM The cross context VM structure.
526 * @param GCPhys The physical address to check.
527 */
528VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
529{
530 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
531 return pPage
532 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
533}
534
535
536/**
537 * Converts a GC physical address to a HC physical address.
538 *
539 * @returns VINF_SUCCESS on success.
540 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
541 * page but has no physical backing.
542 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
543 * GC physical address.
544 *
545 * @param pVM The cross context VM structure.
546 * @param GCPhys The GC physical address to convert.
547 * @param pHCPhys Where to store the HC physical address on success.
548 */
549VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
550{
551 pgmLock(pVM);
552 PPGMPAGE pPage;
553 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
554 if (RT_SUCCESS(rc))
555 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
556 pgmUnlock(pVM);
557 return rc;
558}
559
560
561/**
562 * Invalidates all page mapping TLBs.
563 *
564 * @param pVM The cross context VM structure.
565 */
566void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
567{
568 pgmLock(pVM);
569 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
570
571 /* Clear the R3 & R0 TLBs completely. */
572 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
573 {
574 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
575 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
576#ifndef VBOX_WITH_RAM_IN_KERNEL
577 pVM->pgm.s.PhysTlbR0.aEntries[i].pMap = 0;
578#endif
579 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
580 }
581
582 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
583 {
584 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
585 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
586 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
587 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
588 }
589
590 pgmUnlock(pVM);
591}
592
593
594/**
595 * Invalidates a page mapping TLB entry
596 *
597 * @param pVM The cross context VM structure.
598 * @param GCPhys GCPhys entry to flush
599 */
600void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
601{
602 PGM_LOCK_ASSERT_OWNER(pVM);
603
604 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
605
606 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
607
608 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
609 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
610#ifndef VBOX_WITH_RAM_IN_KERNEL
611 pVM->pgm.s.PhysTlbR0.aEntries[idx].pMap = 0;
612#endif
613 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
614
615 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
616 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
617 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
618 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
619}
620
621
622/**
623 * Makes sure that there is at least one handy page ready for use.
624 *
625 * This will also take the appropriate actions when reaching water-marks.
626 *
627 * @returns VBox status code.
628 * @retval VINF_SUCCESS on success.
629 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
630 *
631 * @param pVM The cross context VM structure.
632 *
633 * @remarks Must be called from within the PGM critical section. It may
634 * nip back to ring-3/0 in some cases.
635 */
636static int pgmPhysEnsureHandyPage(PVMCC pVM)
637{
638 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
639
640 /*
641 * Do we need to do anything special?
642 */
643#ifdef IN_RING3
644 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
645#else
646 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
647#endif
648 {
649 /*
650 * Allocate pages only if we're out of them, or in ring-3, almost out.
651 */
652#ifdef IN_RING3
653 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
654#else
655 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
656#endif
657 {
658 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
659 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
660#ifdef IN_RING3
661 int rc = PGMR3PhysAllocateHandyPages(pVM);
662#else
663 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
664#endif
665 if (RT_UNLIKELY(rc != VINF_SUCCESS))
666 {
667 if (RT_FAILURE(rc))
668 return rc;
669 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
670 if (!pVM->pgm.s.cHandyPages)
671 {
672 LogRel(("PGM: no more handy pages!\n"));
673 return VERR_EM_NO_MEMORY;
674 }
675 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
676 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
677#ifndef IN_RING3
678 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
679#endif
680 }
681 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
682 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
683 ("%u\n", pVM->pgm.s.cHandyPages),
684 VERR_PGM_HANDY_PAGE_IPE);
685 }
686 else
687 {
688 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
689 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
690#ifndef IN_RING3
691 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
692 {
693 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
694 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
695 }
696#endif
697 }
698 }
699
700 return VINF_SUCCESS;
701}
702
703
704
705/**
706 * Replace a zero or shared page with new page that we can write to.
707 *
708 * @returns The following VBox status codes.
709 * @retval VINF_SUCCESS on success, pPage is modified.
710 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
711 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
712 *
713 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
714 *
715 * @param pVM The cross context VM structure.
716 * @param pPage The physical page tracking structure. This will
717 * be modified on success.
718 * @param GCPhys The address of the page.
719 *
720 * @remarks Must be called from within the PGM critical section. It may
721 * nip back to ring-3/0 in some cases.
722 *
723 * @remarks This function shouldn't really fail, however if it does
724 * it probably means we've screwed up the size of handy pages and/or
725 * the low-water mark. Or, that some device I/O is causing a lot of
726 * pages to be allocated while while the host is in a low-memory
727 * condition. This latter should be handled elsewhere and in a more
728 * controlled manner, it's on the @bugref{3170} todo list...
729 */
730int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
731{
732 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
733
734 /*
735 * Prereqs.
736 */
737 PGM_LOCK_ASSERT_OWNER(pVM);
738 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
739 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
740
741# ifdef PGM_WITH_LARGE_PAGES
742 /*
743 * Try allocate a large page if applicable.
744 */
745 if ( PGMIsUsingLargePages(pVM)
746 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
747 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
748 {
749 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
750 PPGMPAGE pBasePage;
751
752 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
753 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
754 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
755 {
756 rc = pgmPhysAllocLargePage(pVM, GCPhys);
757 if (rc == VINF_SUCCESS)
758 return rc;
759 }
760 /* Mark the base as type page table, so we don't check over and over again. */
761 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
762
763 /* fall back to 4KB pages. */
764 }
765# endif
766
767 /*
768 * Flush any shadow page table mappings of the page.
769 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
770 */
771 bool fFlushTLBs = false;
772 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
773 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
774
775 /*
776 * Ensure that we've got a page handy, take it and use it.
777 */
778 int rc2 = pgmPhysEnsureHandyPage(pVM);
779 if (RT_FAILURE(rc2))
780 {
781 if (fFlushTLBs)
782 PGM_INVL_ALL_VCPU_TLBS(pVM);
783 Assert(rc2 == VERR_EM_NO_MEMORY);
784 return rc2;
785 }
786 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
787 PGM_LOCK_ASSERT_OWNER(pVM);
788 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
789 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
790
791 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
792 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
793 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
794 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
795 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
796 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
797
798 /*
799 * There are one or two action to be taken the next time we allocate handy pages:
800 * - Tell the GMM (global memory manager) what the page is being used for.
801 * (Speeds up replacement operations - sharing and defragmenting.)
802 * - If the current backing is shared, it must be freed.
803 */
804 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
805 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
806
807 void const *pvSharedPage = NULL;
808 if (PGM_PAGE_IS_SHARED(pPage))
809 {
810 /* Mark this shared page for freeing/dereferencing. */
811 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
812 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
813
814 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
815 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
816 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
817 pVM->pgm.s.cSharedPages--;
818
819 /* Grab the address of the page so we can make a copy later on. (safe) */
820 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
821 AssertRC(rc);
822 }
823 else
824 {
825 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
826 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
827 pVM->pgm.s.cZeroPages--;
828 }
829
830 /*
831 * Do the PGMPAGE modifications.
832 */
833 pVM->pgm.s.cPrivatePages++;
834 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
835 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
836 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
837 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
838 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
839
840 /* Copy the shared page contents to the replacement page. */
841 if (pvSharedPage)
842 {
843 /* Get the virtual address of the new page. */
844 PGMPAGEMAPLOCK PgMpLck;
845 void *pvNewPage;
846 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
847 if (RT_SUCCESS(rc))
848 {
849 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
850 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
851 }
852 }
853
854 if ( fFlushTLBs
855 && rc != VINF_PGM_GCPHYS_ALIASED)
856 PGM_INVL_ALL_VCPU_TLBS(pVM);
857
858 /*
859 * Notify NEM about the mapping change for this page.
860 *
861 * Note! Shadow ROM pages are complicated as they can definitely be
862 * allocated while not visible, so play safe.
863 */
864 if (VM_IS_NEM_ENABLED(pVM))
865 {
866 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
867 if ( enmType != PGMPAGETYPE_ROM_SHADOW
868 || pgmPhysGetPage(pVM, GCPhys) == pPage)
869 {
870 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
871 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
872 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
873 if (RT_SUCCESS(rc))
874 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
875 else
876 rc = rc2;
877 }
878 }
879
880 return rc;
881}
882
883#ifdef PGM_WITH_LARGE_PAGES
884
885/**
886 * Replace a 2 MB range of zero pages with new pages that we can write to.
887 *
888 * @returns The following VBox status codes.
889 * @retval VINF_SUCCESS on success, pPage is modified.
890 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
891 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
892 *
893 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
894 *
895 * @param pVM The cross context VM structure.
896 * @param GCPhys The address of the page.
897 *
898 * @remarks Must be called from within the PGM critical section. It may
899 * nip back to ring-3/0 in some cases.
900 */
901int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
902{
903 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
904 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
905 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
906
907 /*
908 * Prereqs.
909 */
910 PGM_LOCK_ASSERT_OWNER(pVM);
911 Assert(PGMIsUsingLargePages(pVM));
912
913 PPGMPAGE pFirstPage;
914 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
915 if ( RT_SUCCESS(rc)
916 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
917 {
918 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
919
920 /* Don't call this function for already allocated pages. */
921 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
922
923 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
924 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
925 {
926 /* Lazy approach: check all pages in the 2 MB range.
927 * The whole range must be ram and unallocated. */
928 GCPhys = GCPhysBase;
929 unsigned iPage;
930 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
931 {
932 PPGMPAGE pSubPage;
933 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
934 if ( RT_FAILURE(rc)
935 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
936 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
937 {
938 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
939 break;
940 }
941 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
942 GCPhys += PAGE_SIZE;
943 }
944 if (iPage != _2M/PAGE_SIZE)
945 {
946 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
947 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
948 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
949 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
950 }
951
952 /*
953 * Do the allocation.
954 */
955# ifdef IN_RING3
956 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
957# else
958 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
959# endif
960 if (RT_SUCCESS(rc))
961 {
962 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
963 pVM->pgm.s.cLargePages++;
964 return VINF_SUCCESS;
965 }
966
967 /* If we fail once, it most likely means the host's memory is too
968 fragmented; don't bother trying again. */
969 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
970 PGMSetLargePageUsage(pVM, false);
971 return rc;
972 }
973 }
974 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
975}
976
977
978/**
979 * Recheck the entire 2 MB range to see if we can use it again as a large page.
980 *
981 * @returns The following VBox status codes.
982 * @retval VINF_SUCCESS on success, the large page can be used again
983 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
984 *
985 * @param pVM The cross context VM structure.
986 * @param GCPhys The address of the page.
987 * @param pLargePage Page structure of the base page
988 */
989int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
990{
991 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
992
993 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
994
995 GCPhys &= X86_PDE2M_PAE_PG_MASK;
996
997 /* Check the base page. */
998 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
999 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1000 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1001 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1002 {
1003 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1004 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1005 }
1006
1007 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
1008 /* Check all remaining pages in the 2 MB range. */
1009 unsigned i;
1010 GCPhys += PAGE_SIZE;
1011 for (i = 1; i < _2M/PAGE_SIZE; i++)
1012 {
1013 PPGMPAGE pPage;
1014 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1015 AssertRCBreak(rc);
1016
1017 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1018 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1019 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1020 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1021 {
1022 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1023 break;
1024 }
1025
1026 GCPhys += PAGE_SIZE;
1027 }
1028 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
1029
1030 if (i == _2M/PAGE_SIZE)
1031 {
1032 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1033 pVM->pgm.s.cLargePagesDisabled--;
1034 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1035 return VINF_SUCCESS;
1036 }
1037
1038 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1039}
1040
1041#endif /* PGM_WITH_LARGE_PAGES */
1042
1043
1044/**
1045 * Deal with a write monitored page.
1046 *
1047 * @returns VBox strict status code.
1048 *
1049 * @param pVM The cross context VM structure.
1050 * @param pPage The physical page tracking structure.
1051 * @param GCPhys The guest physical address of the page.
1052 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1053 * very unlikely situation where it is okay that we let NEM
1054 * fix the page access in a lazy fasion.
1055 *
1056 * @remarks Called from within the PGM critical section.
1057 */
1058void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1059{
1060 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1061 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1062 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1063 Assert(pVM->pgm.s.cMonitoredPages > 0);
1064 pVM->pgm.s.cMonitoredPages--;
1065 pVM->pgm.s.cWrittenToPages++;
1066
1067 /*
1068 * Notify NEM about the protection change so we won't spin forever.
1069 *
1070 * Note! NEM need to be handle to lazily correct page protection as we cannot
1071 * really get it 100% right here it seems. The page pool does this too.
1072 */
1073 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1074 {
1075 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1076 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1077 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1078 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1079 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1080 }
1081}
1082
1083
1084/**
1085 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1086 *
1087 * @returns VBox strict status code.
1088 * @retval VINF_SUCCESS on success.
1089 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1090 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1091 *
1092 * @param pVM The cross context VM structure.
1093 * @param pPage The physical page tracking structure.
1094 * @param GCPhys The address of the page.
1095 *
1096 * @remarks Called from within the PGM critical section.
1097 */
1098int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1099{
1100 PGM_LOCK_ASSERT_OWNER(pVM);
1101 switch (PGM_PAGE_GET_STATE(pPage))
1102 {
1103 case PGM_PAGE_STATE_WRITE_MONITORED:
1104 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1105 RT_FALL_THRU();
1106 default: /* to shut up GCC */
1107 case PGM_PAGE_STATE_ALLOCATED:
1108 return VINF_SUCCESS;
1109
1110 /*
1111 * Zero pages can be dummy pages for MMIO or reserved memory,
1112 * so we need to check the flags before joining cause with
1113 * shared page replacement.
1114 */
1115 case PGM_PAGE_STATE_ZERO:
1116 if (PGM_PAGE_IS_MMIO(pPage))
1117 return VERR_PGM_PHYS_PAGE_RESERVED;
1118 RT_FALL_THRU();
1119 case PGM_PAGE_STATE_SHARED:
1120 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1121
1122 /* Not allowed to write to ballooned pages. */
1123 case PGM_PAGE_STATE_BALLOONED:
1124 return VERR_PGM_PHYS_PAGE_BALLOONED;
1125 }
1126}
1127
1128
1129/**
1130 * Internal usage: Map the page specified by its GMM ID.
1131 *
1132 * This is similar to pgmPhysPageMap
1133 *
1134 * @returns VBox status code.
1135 *
1136 * @param pVM The cross context VM structure.
1137 * @param idPage The Page ID.
1138 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1139 * @param ppv Where to store the mapping address.
1140 *
1141 * @remarks Called from within the PGM critical section. The mapping is only
1142 * valid while you are inside this section.
1143 */
1144int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1145{
1146 /*
1147 * Validation.
1148 */
1149 PGM_LOCK_ASSERT_OWNER(pVM);
1150 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1151 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1152 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1153
1154#if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1155# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1156 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
1157# else
1158 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1159# endif
1160
1161#else
1162 /*
1163 * Find/make Chunk TLB entry for the mapping chunk.
1164 */
1165 PPGMCHUNKR3MAP pMap;
1166 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1167 if (pTlbe->idChunk == idChunk)
1168 {
1169 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1170 pMap = pTlbe->pChunk;
1171 }
1172 else
1173 {
1174 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1175
1176 /*
1177 * Find the chunk, map it if necessary.
1178 */
1179 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1180 if (pMap)
1181 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1182 else
1183 {
1184# ifdef IN_RING0
1185 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1186 AssertRCReturn(rc, rc);
1187 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1188 Assert(pMap);
1189# else
1190 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1191 if (RT_FAILURE(rc))
1192 return rc;
1193# endif
1194 }
1195
1196 /*
1197 * Enter it into the Chunk TLB.
1198 */
1199 pTlbe->idChunk = idChunk;
1200 pTlbe->pChunk = pMap;
1201 }
1202
1203 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1204 return VINF_SUCCESS;
1205#endif
1206}
1207
1208
1209/**
1210 * Maps a page into the current virtual address space so it can be accessed.
1211 *
1212 * @returns VBox status code.
1213 * @retval VINF_SUCCESS on success.
1214 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1215 *
1216 * @param pVM The cross context VM structure.
1217 * @param pPage The physical page tracking structure.
1218 * @param GCPhys The address of the page.
1219 * @param ppMap Where to store the address of the mapping tracking structure.
1220 * @param ppv Where to store the mapping address of the page. The page
1221 * offset is masked off!
1222 *
1223 * @remarks Called from within the PGM critical section.
1224 */
1225static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1226{
1227 PGM_LOCK_ASSERT_OWNER(pVM);
1228 NOREF(GCPhys);
1229
1230 /*
1231 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1232 */
1233 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1234 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1235 {
1236 /* Decode the page id to a page in a MMIO2 ram range. */
1237 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1238 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1239 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1240 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1241 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1242 pPage->s.idPage, pPage->s.uStateY),
1243 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1244 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1245 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1246 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1247 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1248 *ppMap = NULL;
1249# if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1250 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1251# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1252 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << PAGE_SHIFT);
1253 return VINF_SUCCESS;
1254# else
1255 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1256 return VINF_SUCCESS;
1257# endif
1258 }
1259
1260 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1261 if (idChunk == NIL_GMM_CHUNKID)
1262 {
1263 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1264 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1265 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1266 {
1267 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1268 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1269 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1270 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1271 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1272 }
1273 else
1274 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1275 *ppMap = NULL;
1276 return VINF_SUCCESS;
1277 }
1278
1279# if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1280 /*
1281 * Just use the physical address.
1282 */
1283 *ppMap = NULL;
1284 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1285
1286# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1287 /*
1288 * Go by page ID thru GMMR0.
1289 */
1290 *ppMap = NULL;
1291 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1292
1293# else
1294 /*
1295 * Find/make Chunk TLB entry for the mapping chunk.
1296 */
1297 PPGMCHUNKR3MAP pMap;
1298 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1299 if (pTlbe->idChunk == idChunk)
1300 {
1301 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1302 pMap = pTlbe->pChunk;
1303 AssertPtr(pMap->pv);
1304 }
1305 else
1306 {
1307 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1308
1309 /*
1310 * Find the chunk, map it if necessary.
1311 */
1312 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1313 if (pMap)
1314 {
1315 AssertPtr(pMap->pv);
1316 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1317 }
1318 else
1319 {
1320# ifdef IN_RING0
1321 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1322 AssertRCReturn(rc, rc);
1323 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1324 Assert(pMap);
1325# else
1326 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1327 if (RT_FAILURE(rc))
1328 return rc;
1329# endif
1330 AssertPtr(pMap->pv);
1331 }
1332
1333 /*
1334 * Enter it into the Chunk TLB.
1335 */
1336 pTlbe->idChunk = idChunk;
1337 pTlbe->pChunk = pMap;
1338 }
1339
1340 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1341 *ppMap = pMap;
1342 return VINF_SUCCESS;
1343# endif /* !IN_RING0 || !VBOX_WITH_RAM_IN_KERNEL */
1344}
1345
1346
1347/**
1348 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1349 *
1350 * This is typically used is paths where we cannot use the TLB methods (like ROM
1351 * pages) or where there is no point in using them since we won't get many hits.
1352 *
1353 * @returns VBox strict status code.
1354 * @retval VINF_SUCCESS on success.
1355 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1356 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1357 *
1358 * @param pVM The cross context VM structure.
1359 * @param pPage The physical page tracking structure.
1360 * @param GCPhys The address of the page.
1361 * @param ppv Where to store the mapping address of the page. The page
1362 * offset is masked off!
1363 *
1364 * @remarks Called from within the PGM critical section. The mapping is only
1365 * valid while you are inside section.
1366 */
1367int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1368{
1369 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1370 if (RT_SUCCESS(rc))
1371 {
1372 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1373 PPGMPAGEMAP pMapIgnore;
1374 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1375 if (RT_FAILURE(rc2)) /* preserve rc */
1376 rc = rc2;
1377 }
1378 return rc;
1379}
1380
1381
1382/**
1383 * Maps a page into the current virtual address space so it can be accessed for
1384 * both writing and reading.
1385 *
1386 * This is typically used is paths where we cannot use the TLB methods (like ROM
1387 * pages) or where there is no point in using them since we won't get many hits.
1388 *
1389 * @returns VBox status code.
1390 * @retval VINF_SUCCESS on success.
1391 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1392 *
1393 * @param pVM The cross context VM structure.
1394 * @param pPage The physical page tracking structure. Must be in the
1395 * allocated state.
1396 * @param GCPhys The address of the page.
1397 * @param ppv Where to store the mapping address of the page. The page
1398 * offset is masked off!
1399 *
1400 * @remarks Called from within the PGM critical section. The mapping is only
1401 * valid while you are inside section.
1402 */
1403int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1404{
1405 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1406 PPGMPAGEMAP pMapIgnore;
1407 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1408}
1409
1410
1411/**
1412 * Maps a page into the current virtual address space so it can be accessed for
1413 * reading.
1414 *
1415 * This is typically used is paths where we cannot use the TLB methods (like ROM
1416 * pages) or where there is no point in using them since we won't get many hits.
1417 *
1418 * @returns VBox status code.
1419 * @retval VINF_SUCCESS on success.
1420 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1421 *
1422 * @param pVM The cross context VM structure.
1423 * @param pPage The physical page tracking structure.
1424 * @param GCPhys The address of the page.
1425 * @param ppv Where to store the mapping address of the page. The page
1426 * offset is masked off!
1427 *
1428 * @remarks Called from within the PGM critical section. The mapping is only
1429 * valid while you are inside this section.
1430 */
1431int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1432{
1433 PPGMPAGEMAP pMapIgnore;
1434 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1435}
1436
1437
1438/**
1439 * Load a guest page into the ring-3 physical TLB.
1440 *
1441 * @returns VBox status code.
1442 * @retval VINF_SUCCESS on success
1443 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1444 * @param pVM The cross context VM structure.
1445 * @param GCPhys The guest physical address in question.
1446 */
1447int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1448{
1449 PGM_LOCK_ASSERT_OWNER(pVM);
1450
1451 /*
1452 * Find the ram range and page and hand it over to the with-page function.
1453 * 99.8% of requests are expected to be in the first range.
1454 */
1455 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1456 if (!pPage)
1457 {
1458 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1459 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1460 }
1461
1462 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1463}
1464
1465
1466/**
1467 * Load a guest page into the ring-3 physical TLB.
1468 *
1469 * @returns VBox status code.
1470 * @retval VINF_SUCCESS on success
1471 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1472 *
1473 * @param pVM The cross context VM structure.
1474 * @param pPage Pointer to the PGMPAGE structure corresponding to
1475 * GCPhys.
1476 * @param GCPhys The guest physical address in question.
1477 */
1478int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1479{
1480 PGM_LOCK_ASSERT_OWNER(pVM);
1481 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1482
1483 /*
1484 * Map the page.
1485 * Make a special case for the zero page as it is kind of special.
1486 */
1487 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1488 if ( !PGM_PAGE_IS_ZERO(pPage)
1489 && !PGM_PAGE_IS_BALLOONED(pPage))
1490 {
1491 void *pv;
1492 PPGMPAGEMAP pMap;
1493 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1494 if (RT_FAILURE(rc))
1495 return rc;
1496# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1497 pTlbe->pMap = pMap;
1498# endif
1499 pTlbe->pv = pv;
1500 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1501 }
1502 else
1503 {
1504 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1505# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1506 pTlbe->pMap = NULL;
1507# endif
1508 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1509 }
1510# ifdef PGM_WITH_PHYS_TLB
1511 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1512 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1513 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1514 else
1515 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1516# else
1517 pTlbe->GCPhys = NIL_RTGCPHYS;
1518# endif
1519 pTlbe->pPage = pPage;
1520 return VINF_SUCCESS;
1521}
1522
1523
1524/**
1525 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1526 * own the PGM lock and therefore not need to lock the mapped page.
1527 *
1528 * @returns VBox status code.
1529 * @retval VINF_SUCCESS on success.
1530 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1531 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1532 *
1533 * @param pVM The cross context VM structure.
1534 * @param GCPhys The guest physical address of the page that should be mapped.
1535 * @param pPage Pointer to the PGMPAGE structure for the page.
1536 * @param ppv Where to store the address corresponding to GCPhys.
1537 *
1538 * @internal
1539 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1540 */
1541int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1542{
1543 int rc;
1544 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1545 PGM_LOCK_ASSERT_OWNER(pVM);
1546 pVM->pgm.s.cDeprecatedPageLocks++;
1547
1548 /*
1549 * Make sure the page is writable.
1550 */
1551 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1552 {
1553 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1554 if (RT_FAILURE(rc))
1555 return rc;
1556 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1557 }
1558 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1559
1560 /*
1561 * Get the mapping address.
1562 */
1563 PPGMPAGEMAPTLBE pTlbe;
1564 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1565 if (RT_FAILURE(rc))
1566 return rc;
1567 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1568 return VINF_SUCCESS;
1569}
1570
1571
1572/**
1573 * Locks a page mapping for writing.
1574 *
1575 * @param pVM The cross context VM structure.
1576 * @param pPage The page.
1577 * @param pTlbe The mapping TLB entry for the page.
1578 * @param pLock The lock structure (output).
1579 */
1580DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1581{
1582# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1583 PPGMPAGEMAP pMap = pTlbe->pMap;
1584 if (pMap)
1585 pMap->cRefs++;
1586# else
1587 RT_NOREF(pTlbe);
1588# endif
1589
1590 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1591 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1592 {
1593 if (cLocks == 0)
1594 pVM->pgm.s.cWriteLockedPages++;
1595 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1596 }
1597 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1598 {
1599 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1600 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1601# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1602 if (pMap)
1603 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1604# endif
1605 }
1606
1607 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1608# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1609 pLock->pvMap = pMap;
1610# else
1611 pLock->pvMap = NULL;
1612# endif
1613}
1614
1615/**
1616 * Locks a page mapping for reading.
1617 *
1618 * @param pVM The cross context VM structure.
1619 * @param pPage The page.
1620 * @param pTlbe The mapping TLB entry for the page.
1621 * @param pLock The lock structure (output).
1622 */
1623DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1624{
1625# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1626 PPGMPAGEMAP pMap = pTlbe->pMap;
1627 if (pMap)
1628 pMap->cRefs++;
1629# else
1630 RT_NOREF(pTlbe);
1631# endif
1632
1633 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1634 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1635 {
1636 if (cLocks == 0)
1637 pVM->pgm.s.cReadLockedPages++;
1638 PGM_PAGE_INC_READ_LOCKS(pPage);
1639 }
1640 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1641 {
1642 PGM_PAGE_INC_READ_LOCKS(pPage);
1643 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1644# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1645 if (pMap)
1646 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1647# endif
1648 }
1649
1650 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1651# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1652 pLock->pvMap = pMap;
1653# else
1654 pLock->pvMap = NULL;
1655# endif
1656}
1657
1658
1659/**
1660 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1661 * own the PGM lock and have access to the page structure.
1662 *
1663 * @returns VBox status code.
1664 * @retval VINF_SUCCESS on success.
1665 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1666 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1667 *
1668 * @param pVM The cross context VM structure.
1669 * @param GCPhys The guest physical address of the page that should be mapped.
1670 * @param pPage Pointer to the PGMPAGE structure for the page.
1671 * @param ppv Where to store the address corresponding to GCPhys.
1672 * @param pLock Where to store the lock information that
1673 * pgmPhysReleaseInternalPageMappingLock needs.
1674 *
1675 * @internal
1676 */
1677int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1678{
1679 int rc;
1680 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1681 PGM_LOCK_ASSERT_OWNER(pVM);
1682
1683 /*
1684 * Make sure the page is writable.
1685 */
1686 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1687 {
1688 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1689 if (RT_FAILURE(rc))
1690 return rc;
1691 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1692 }
1693 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1694
1695 /*
1696 * Do the job.
1697 */
1698 PPGMPAGEMAPTLBE pTlbe;
1699 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1700 if (RT_FAILURE(rc))
1701 return rc;
1702 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1703 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1704 return VINF_SUCCESS;
1705}
1706
1707
1708/**
1709 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1710 * own the PGM lock and have access to the page structure.
1711 *
1712 * @returns VBox status code.
1713 * @retval VINF_SUCCESS on success.
1714 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1715 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1716 *
1717 * @param pVM The cross context VM structure.
1718 * @param GCPhys The guest physical address of the page that should be mapped.
1719 * @param pPage Pointer to the PGMPAGE structure for the page.
1720 * @param ppv Where to store the address corresponding to GCPhys.
1721 * @param pLock Where to store the lock information that
1722 * pgmPhysReleaseInternalPageMappingLock needs.
1723 *
1724 * @internal
1725 */
1726int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1727{
1728 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1729 PGM_LOCK_ASSERT_OWNER(pVM);
1730 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1731
1732 /*
1733 * Do the job.
1734 */
1735 PPGMPAGEMAPTLBE pTlbe;
1736 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1737 if (RT_FAILURE(rc))
1738 return rc;
1739 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1740 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1741 return VINF_SUCCESS;
1742}
1743
1744
1745/**
1746 * Requests the mapping of a guest page into the current context.
1747 *
1748 * This API should only be used for very short term, as it will consume scarse
1749 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1750 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1751 *
1752 * This API will assume your intention is to write to the page, and will
1753 * therefore replace shared and zero pages. If you do not intend to modify
1754 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1755 *
1756 * @returns VBox status code.
1757 * @retval VINF_SUCCESS on success.
1758 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1759 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1760 *
1761 * @param pVM The cross context VM structure.
1762 * @param GCPhys The guest physical address of the page that should be
1763 * mapped.
1764 * @param ppv Where to store the address corresponding to GCPhys.
1765 * @param pLock Where to store the lock information that
1766 * PGMPhysReleasePageMappingLock needs.
1767 *
1768 * @remarks The caller is responsible for dealing with access handlers.
1769 * @todo Add an informational return code for pages with access handlers?
1770 *
1771 * @remark Avoid calling this API from within critical sections (other than
1772 * the PGM one) because of the deadlock risk. External threads may
1773 * need to delegate jobs to the EMTs.
1774 * @remarks Only one page is mapped! Make no assumption about what's after or
1775 * before the returned page!
1776 * @thread Any thread.
1777 */
1778VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1779{
1780 int rc = pgmLock(pVM);
1781 AssertRCReturn(rc, rc);
1782
1783 /*
1784 * Query the Physical TLB entry for the page (may fail).
1785 */
1786 PPGMPAGEMAPTLBE pTlbe;
1787 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1788 if (RT_SUCCESS(rc))
1789 {
1790 /*
1791 * If the page is shared, the zero page, or being write monitored
1792 * it must be converted to a page that's writable if possible.
1793 */
1794 PPGMPAGE pPage = pTlbe->pPage;
1795 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1796 {
1797 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1798 if (RT_SUCCESS(rc))
1799 {
1800 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1801 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1802 }
1803 }
1804 if (RT_SUCCESS(rc))
1805 {
1806 /*
1807 * Now, just perform the locking and calculate the return address.
1808 */
1809 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1810 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1811 }
1812 }
1813
1814 pgmUnlock(pVM);
1815 return rc;
1816}
1817
1818
1819/**
1820 * Requests the mapping of a guest page into the current context.
1821 *
1822 * This API should only be used for very short term, as it will consume scarse
1823 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1824 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1825 *
1826 * @returns VBox status code.
1827 * @retval VINF_SUCCESS on success.
1828 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1829 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1830 *
1831 * @param pVM The cross context VM structure.
1832 * @param GCPhys The guest physical address of the page that should be
1833 * mapped.
1834 * @param ppv Where to store the address corresponding to GCPhys.
1835 * @param pLock Where to store the lock information that
1836 * PGMPhysReleasePageMappingLock needs.
1837 *
1838 * @remarks The caller is responsible for dealing with access handlers.
1839 * @todo Add an informational return code for pages with access handlers?
1840 *
1841 * @remarks Avoid calling this API from within critical sections (other than
1842 * the PGM one) because of the deadlock risk.
1843 * @remarks Only one page is mapped! Make no assumption about what's after or
1844 * before the returned page!
1845 * @thread Any thread.
1846 */
1847VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1848{
1849 int rc = pgmLock(pVM);
1850 AssertRCReturn(rc, rc);
1851
1852 /*
1853 * Query the Physical TLB entry for the page (may fail).
1854 */
1855 PPGMPAGEMAPTLBE pTlbe;
1856 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1857 if (RT_SUCCESS(rc))
1858 {
1859 /* MMIO pages doesn't have any readable backing. */
1860 PPGMPAGE pPage = pTlbe->pPage;
1861 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1862 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1863 else
1864 {
1865 /*
1866 * Now, just perform the locking and calculate the return address.
1867 */
1868 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1869 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1870 }
1871 }
1872
1873 pgmUnlock(pVM);
1874 return rc;
1875}
1876
1877
1878/**
1879 * Requests the mapping of a guest page given by virtual address into the current context.
1880 *
1881 * This API should only be used for very short term, as it will consume
1882 * scarse resources (R0 and GC) in the mapping cache. When you're done
1883 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1884 *
1885 * This API will assume your intention is to write to the page, and will
1886 * therefore replace shared and zero pages. If you do not intend to modify
1887 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1888 *
1889 * @returns VBox status code.
1890 * @retval VINF_SUCCESS on success.
1891 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1892 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1893 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1894 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1895 *
1896 * @param pVCpu The cross context virtual CPU structure.
1897 * @param GCPtr The guest physical address of the page that should be
1898 * mapped.
1899 * @param ppv Where to store the address corresponding to GCPhys.
1900 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1901 *
1902 * @remark Avoid calling this API from within critical sections (other than
1903 * the PGM one) because of the deadlock risk.
1904 * @thread EMT
1905 */
1906VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1907{
1908 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1909 RTGCPHYS GCPhys;
1910 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1911 if (RT_SUCCESS(rc))
1912 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Requests the mapping of a guest page given by virtual address into the current context.
1919 *
1920 * This API should only be used for very short term, as it will consume
1921 * scarse resources (R0 and GC) in the mapping cache. When you're done
1922 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1923 *
1924 * @returns VBox status code.
1925 * @retval VINF_SUCCESS on success.
1926 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1927 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1928 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1929 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1930 *
1931 * @param pVCpu The cross context virtual CPU structure.
1932 * @param GCPtr The guest physical address of the page that should be
1933 * mapped.
1934 * @param ppv Where to store the address corresponding to GCPtr.
1935 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1936 *
1937 * @remark Avoid calling this API from within critical sections (other than
1938 * the PGM one) because of the deadlock risk.
1939 * @thread EMT
1940 */
1941VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1942{
1943 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1944 RTGCPHYS GCPhys;
1945 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1946 if (RT_SUCCESS(rc))
1947 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1948 return rc;
1949}
1950
1951
1952/**
1953 * Release the mapping of a guest page.
1954 *
1955 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1956 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1957 *
1958 * @param pVM The cross context VM structure.
1959 * @param pLock The lock structure initialized by the mapping function.
1960 */
1961VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
1962{
1963# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1964 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1965# endif
1966 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1967 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1968
1969 pLock->uPageAndType = 0;
1970 pLock->pvMap = NULL;
1971
1972 pgmLock(pVM);
1973 if (fWriteLock)
1974 {
1975 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1976 Assert(cLocks > 0);
1977 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1978 {
1979 if (cLocks == 1)
1980 {
1981 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1982 pVM->pgm.s.cWriteLockedPages--;
1983 }
1984 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1985 }
1986
1987 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
1988 { /* probably extremely likely */ }
1989 else
1990 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
1991 }
1992 else
1993 {
1994 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1995 Assert(cLocks > 0);
1996 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1997 {
1998 if (cLocks == 1)
1999 {
2000 Assert(pVM->pgm.s.cReadLockedPages > 0);
2001 pVM->pgm.s.cReadLockedPages--;
2002 }
2003 PGM_PAGE_DEC_READ_LOCKS(pPage);
2004 }
2005 }
2006
2007# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
2008 if (pMap)
2009 {
2010 Assert(pMap->cRefs >= 1);
2011 pMap->cRefs--;
2012 }
2013# endif
2014 pgmUnlock(pVM);
2015}
2016
2017
2018#ifdef IN_RING3
2019/**
2020 * Release the mapping of multiple guest pages.
2021 *
2022 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2023 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2024 *
2025 * @param pVM The cross context VM structure.
2026 * @param cPages Number of pages to unlock.
2027 * @param paLocks Array of locks lock structure initialized by the mapping
2028 * function.
2029 */
2030VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2031{
2032 Assert(cPages > 0);
2033 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2034#ifdef VBOX_STRICT
2035 for (uint32_t i = 1; i < cPages; i++)
2036 {
2037 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2038 AssertPtr(paLocks[i].uPageAndType);
2039 }
2040#endif
2041
2042 pgmLock(pVM);
2043 if (fWriteLock)
2044 {
2045 /*
2046 * Write locks:
2047 */
2048 for (uint32_t i = 0; i < cPages; i++)
2049 {
2050 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2051 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2052 Assert(cLocks > 0);
2053 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2054 {
2055 if (cLocks == 1)
2056 {
2057 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2058 pVM->pgm.s.cWriteLockedPages--;
2059 }
2060 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2061 }
2062
2063 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2064 { /* probably extremely likely */ }
2065 else
2066 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2067
2068 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2069 if (pMap)
2070 {
2071 Assert(pMap->cRefs >= 1);
2072 pMap->cRefs--;
2073 }
2074
2075 /* Yield the lock: */
2076 if ((i & 1023) == 1023)
2077 {
2078 pgmLock(pVM);
2079 pgmUnlock(pVM);
2080 }
2081 }
2082 }
2083 else
2084 {
2085 /*
2086 * Read locks:
2087 */
2088 for (uint32_t i = 0; i < cPages; i++)
2089 {
2090 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2091 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2092 Assert(cLocks > 0);
2093 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2094 {
2095 if (cLocks == 1)
2096 {
2097 Assert(pVM->pgm.s.cReadLockedPages > 0);
2098 pVM->pgm.s.cReadLockedPages--;
2099 }
2100 PGM_PAGE_DEC_READ_LOCKS(pPage);
2101 }
2102
2103 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2104 if (pMap)
2105 {
2106 Assert(pMap->cRefs >= 1);
2107 pMap->cRefs--;
2108 }
2109
2110 /* Yield the lock: */
2111 if ((i & 1023) == 1023)
2112 {
2113 pgmLock(pVM);
2114 pgmUnlock(pVM);
2115 }
2116 }
2117 }
2118 pgmUnlock(pVM);
2119
2120 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2121}
2122#endif /* IN_RING3 */
2123
2124
2125/**
2126 * Release the internal mapping of a guest page.
2127 *
2128 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2129 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2130 *
2131 * @param pVM The cross context VM structure.
2132 * @param pLock The lock structure initialized by the mapping function.
2133 *
2134 * @remarks Caller must hold the PGM lock.
2135 */
2136void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2137{
2138 PGM_LOCK_ASSERT_OWNER(pVM);
2139 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2140}
2141
2142
2143/**
2144 * Converts a GC physical address to a HC ring-3 pointer.
2145 *
2146 * @returns VINF_SUCCESS on success.
2147 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2148 * page but has no physical backing.
2149 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2150 * GC physical address.
2151 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2152 * a dynamic ram chunk boundary
2153 *
2154 * @param pVM The cross context VM structure.
2155 * @param GCPhys The GC physical address to convert.
2156 * @param pR3Ptr Where to store the R3 pointer on success.
2157 *
2158 * @deprecated Avoid when possible!
2159 */
2160int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2161{
2162/** @todo this is kind of hacky and needs some more work. */
2163#ifndef DEBUG_sandervl
2164 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2165#endif
2166
2167 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2168 pgmLock(pVM);
2169
2170 PPGMRAMRANGE pRam;
2171 PPGMPAGE pPage;
2172 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2173 if (RT_SUCCESS(rc))
2174 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2175
2176 pgmUnlock(pVM);
2177 Assert(rc <= VINF_SUCCESS);
2178 return rc;
2179}
2180
2181
2182/**
2183 * Converts a guest pointer to a GC physical address.
2184 *
2185 * This uses the current CR3/CR0/CR4 of the guest.
2186 *
2187 * @returns VBox status code.
2188 * @param pVCpu The cross context virtual CPU structure.
2189 * @param GCPtr The guest pointer to convert.
2190 * @param pGCPhys Where to store the GC physical address.
2191 */
2192VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2193{
2194 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2195 if (pGCPhys && RT_SUCCESS(rc))
2196 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2197 return rc;
2198}
2199
2200
2201/**
2202 * Converts a guest pointer to a HC physical address.
2203 *
2204 * This uses the current CR3/CR0/CR4 of the guest.
2205 *
2206 * @returns VBox status code.
2207 * @param pVCpu The cross context virtual CPU structure.
2208 * @param GCPtr The guest pointer to convert.
2209 * @param pHCPhys Where to store the HC physical address.
2210 */
2211VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2212{
2213 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2214 RTGCPHYS GCPhys;
2215 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2216 if (RT_SUCCESS(rc))
2217 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2218 return rc;
2219}
2220
2221
2222
2223#undef LOG_GROUP
2224#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2225
2226
2227#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2228/**
2229 * Cache PGMPhys memory access
2230 *
2231 * @param pVM The cross context VM structure.
2232 * @param pCache Cache structure pointer
2233 * @param GCPhys GC physical address
2234 * @param pbHC HC pointer corresponding to physical page
2235 *
2236 * @thread EMT.
2237 */
2238static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2239{
2240 uint32_t iCacheIndex;
2241
2242 Assert(VM_IS_EMT(pVM));
2243
2244 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2245 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2246
2247 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2248
2249 ASMBitSet(&pCache->aEntries, iCacheIndex);
2250
2251 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2252 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2253}
2254#endif /* IN_RING3 */
2255
2256
2257/**
2258 * Deals with reading from a page with one or more ALL access handlers.
2259 *
2260 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2261 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2262 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2263 *
2264 * @param pVM The cross context VM structure.
2265 * @param pPage The page descriptor.
2266 * @param GCPhys The physical address to start reading at.
2267 * @param pvBuf Where to put the bits we read.
2268 * @param cb How much to read - less or equal to a page.
2269 * @param enmOrigin The origin of this call.
2270 */
2271static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2272 PGMACCESSORIGIN enmOrigin)
2273{
2274 /*
2275 * The most frequent access here is MMIO and shadowed ROM.
2276 * The current code ASSUMES all these access handlers covers full pages!
2277 */
2278
2279 /*
2280 * Whatever we do we need the source page, map it first.
2281 */
2282 PGMPAGEMAPLOCK PgMpLck;
2283 const void *pvSrc = NULL;
2284 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2285/** @todo Check how this can work for MMIO pages? */
2286 if (RT_FAILURE(rc))
2287 {
2288 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2289 GCPhys, pPage, rc));
2290 memset(pvBuf, 0xff, cb);
2291 return VINF_SUCCESS;
2292 }
2293
2294 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2295
2296 /*
2297 * Deal with any physical handlers.
2298 */
2299 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2300 PPGMPHYSHANDLER pPhys = NULL;
2301 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2302 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2303 {
2304 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2305 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2306 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2307 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2308 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2309#ifndef IN_RING3
2310 if (enmOrigin != PGMACCESSORIGIN_IEM)
2311 {
2312 /* Cannot reliably handle informational status codes in this context */
2313 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2314 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2315 }
2316#endif
2317 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2318 void *pvUser = pPhys->CTX_SUFF(pvUser);
2319
2320 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2321 STAM_PROFILE_START(&pPhys->Stat, h);
2322 PGM_LOCK_ASSERT_OWNER(pVM);
2323
2324 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2325 pgmUnlock(pVM);
2326 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2327 pgmLock(pVM);
2328
2329#ifdef VBOX_WITH_STATISTICS
2330 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2331 if (pPhys)
2332 STAM_PROFILE_STOP(&pPhys->Stat, h);
2333#else
2334 pPhys = NULL; /* might not be valid anymore. */
2335#endif
2336 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2337 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2338 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2339 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2340 {
2341 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2342 return rcStrict;
2343 }
2344 }
2345
2346 /*
2347 * Take the default action.
2348 */
2349 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2350 {
2351 memcpy(pvBuf, pvSrc, cb);
2352 rcStrict = VINF_SUCCESS;
2353 }
2354 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2355 return rcStrict;
2356}
2357
2358
2359/**
2360 * Read physical memory.
2361 *
2362 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2363 * want to ignore those.
2364 *
2365 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2366 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2367 * @retval VINF_SUCCESS in all context - read completed.
2368 *
2369 * @retval VINF_EM_OFF in RC and R0 - read completed.
2370 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2371 * @retval VINF_EM_RESET in RC and R0 - read completed.
2372 * @retval VINF_EM_HALT in RC and R0 - read completed.
2373 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2374 *
2375 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2376 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2377 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2378 *
2379 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2380 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2381 *
2382 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2383 *
2384 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2385 * haven't been cleared for strict status codes yet.
2386 *
2387 * @param pVM The cross context VM structure.
2388 * @param GCPhys Physical address start reading from.
2389 * @param pvBuf Where to put the read bits.
2390 * @param cbRead How many bytes to read.
2391 * @param enmOrigin The origin of this call.
2392 */
2393VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2394{
2395 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2396 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2397
2398 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2399 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2400
2401 pgmLock(pVM);
2402
2403 /*
2404 * Copy loop on ram ranges.
2405 */
2406 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2407 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2408 for (;;)
2409 {
2410 /* Inside range or not? */
2411 if (pRam && GCPhys >= pRam->GCPhys)
2412 {
2413 /*
2414 * Must work our way thru this page by page.
2415 */
2416 RTGCPHYS off = GCPhys - pRam->GCPhys;
2417 while (off < pRam->cb)
2418 {
2419 unsigned iPage = off >> PAGE_SHIFT;
2420 PPGMPAGE pPage = &pRam->aPages[iPage];
2421 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2422 if (cb > cbRead)
2423 cb = cbRead;
2424
2425 /*
2426 * Normal page? Get the pointer to it.
2427 */
2428 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2429 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2430 {
2431 /*
2432 * Get the pointer to the page.
2433 */
2434 PGMPAGEMAPLOCK PgMpLck;
2435 const void *pvSrc;
2436 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2437 if (RT_SUCCESS(rc))
2438 {
2439 memcpy(pvBuf, pvSrc, cb);
2440 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2441 }
2442 else
2443 {
2444 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2445 pRam->GCPhys + off, pPage, rc));
2446 memset(pvBuf, 0xff, cb);
2447 }
2448 }
2449 /*
2450 * Have ALL/MMIO access handlers.
2451 */
2452 else
2453 {
2454 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2455 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2456 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2457 else
2458 {
2459 memset(pvBuf, 0xff, cb);
2460 pgmUnlock(pVM);
2461 return rcStrict2;
2462 }
2463 }
2464
2465 /* next page */
2466 if (cb >= cbRead)
2467 {
2468 pgmUnlock(pVM);
2469 return rcStrict;
2470 }
2471 cbRead -= cb;
2472 off += cb;
2473 pvBuf = (char *)pvBuf + cb;
2474 } /* walk pages in ram range. */
2475
2476 GCPhys = pRam->GCPhysLast + 1;
2477 }
2478 else
2479 {
2480 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2481
2482 /*
2483 * Unassigned address space.
2484 */
2485 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2486 if (cb >= cbRead)
2487 {
2488 memset(pvBuf, 0xff, cbRead);
2489 break;
2490 }
2491 memset(pvBuf, 0xff, cb);
2492
2493 cbRead -= cb;
2494 pvBuf = (char *)pvBuf + cb;
2495 GCPhys += cb;
2496 }
2497
2498 /* Advance range if necessary. */
2499 while (pRam && GCPhys > pRam->GCPhysLast)
2500 pRam = pRam->CTX_SUFF(pNext);
2501 } /* Ram range walk */
2502
2503 pgmUnlock(pVM);
2504 return rcStrict;
2505}
2506
2507
2508/**
2509 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2510 *
2511 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2512 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2513 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2514 *
2515 * @param pVM The cross context VM structure.
2516 * @param pPage The page descriptor.
2517 * @param GCPhys The physical address to start writing at.
2518 * @param pvBuf What to write.
2519 * @param cbWrite How much to write - less or equal to a page.
2520 * @param enmOrigin The origin of this call.
2521 */
2522static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2523 PGMACCESSORIGIN enmOrigin)
2524{
2525 PGMPAGEMAPLOCK PgMpLck;
2526 void *pvDst = NULL;
2527 VBOXSTRICTRC rcStrict;
2528
2529 /*
2530 * Give priority to physical handlers (like #PF does).
2531 *
2532 * Hope for a lonely physical handler first that covers the whole
2533 * write area. This should be a pretty frequent case with MMIO and
2534 * the heavy usage of full page handlers in the page pool.
2535 */
2536 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2537 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2538 if (pCur)
2539 {
2540 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2541#ifndef IN_RING3
2542 if (enmOrigin != PGMACCESSORIGIN_IEM)
2543 /* Cannot reliably handle informational status codes in this context */
2544 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2545#endif
2546 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2547 if (cbRange > cbWrite)
2548 cbRange = cbWrite;
2549
2550 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2551 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2552 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2553 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2554 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2555 else
2556 rcStrict = VINF_SUCCESS;
2557 if (RT_SUCCESS(rcStrict))
2558 {
2559 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2560 void *pvUser = pCur->CTX_SUFF(pvUser);
2561 STAM_PROFILE_START(&pCur->Stat, h);
2562
2563 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2564 PGM_LOCK_ASSERT_OWNER(pVM);
2565 pgmUnlock(pVM);
2566 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2567 pgmLock(pVM);
2568
2569#ifdef VBOX_WITH_STATISTICS
2570 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2571 if (pCur)
2572 STAM_PROFILE_STOP(&pCur->Stat, h);
2573#else
2574 pCur = NULL; /* might not be valid anymore. */
2575#endif
2576 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2577 {
2578 if (pvDst)
2579 memcpy(pvDst, pvBuf, cbRange);
2580 rcStrict = VINF_SUCCESS;
2581 }
2582 else
2583 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2584 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2585 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2586 }
2587 else
2588 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2589 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2590 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2591 {
2592 if (pvDst)
2593 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2594 return rcStrict;
2595 }
2596
2597 /* more fun to be had below */
2598 cbWrite -= cbRange;
2599 GCPhys += cbRange;
2600 pvBuf = (uint8_t *)pvBuf + cbRange;
2601 pvDst = (uint8_t *)pvDst + cbRange;
2602 }
2603 else /* The handler is somewhere else in the page, deal with it below. */
2604 rcStrict = VINF_SUCCESS;
2605 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2606
2607 /*
2608 * Deal with all the odd ends (used to be deal with virt+phys).
2609 */
2610 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2611
2612 /* We need a writable destination page. */
2613 if (!pvDst)
2614 {
2615 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2616 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2617 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2618 rc2);
2619 }
2620
2621 /* The loop state (big + ugly). */
2622 PPGMPHYSHANDLER pPhys = NULL;
2623 uint32_t offPhys = PAGE_SIZE;
2624 uint32_t offPhysLast = PAGE_SIZE;
2625 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2626
2627 /* The loop. */
2628 for (;;)
2629 {
2630 if (fMorePhys && !pPhys)
2631 {
2632 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2633 if (pPhys)
2634 {
2635 offPhys = 0;
2636 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2637 }
2638 else
2639 {
2640 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2641 GCPhys, true /* fAbove */);
2642 if ( pPhys
2643 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2644 {
2645 offPhys = pPhys->Core.Key - GCPhys;
2646 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2647 }
2648 else
2649 {
2650 pPhys = NULL;
2651 fMorePhys = false;
2652 offPhys = offPhysLast = PAGE_SIZE;
2653 }
2654 }
2655 }
2656
2657 /*
2658 * Handle access to space without handlers (that's easy).
2659 */
2660 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2661 uint32_t cbRange = (uint32_t)cbWrite;
2662
2663 /*
2664 * Physical handler.
2665 */
2666 if (!offPhys)
2667 {
2668#ifndef IN_RING3
2669 if (enmOrigin != PGMACCESSORIGIN_IEM)
2670 /* Cannot reliably handle informational status codes in this context */
2671 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2672#endif
2673 if (cbRange > offPhysLast + 1)
2674 cbRange = offPhysLast + 1;
2675
2676 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2677 void *pvUser = pPhys->CTX_SUFF(pvUser);
2678
2679 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2680 STAM_PROFILE_START(&pPhys->Stat, h);
2681
2682 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2683 PGM_LOCK_ASSERT_OWNER(pVM);
2684 pgmUnlock(pVM);
2685 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2686 pgmLock(pVM);
2687
2688#ifdef VBOX_WITH_STATISTICS
2689 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2690 if (pPhys)
2691 STAM_PROFILE_STOP(&pPhys->Stat, h);
2692#else
2693 pPhys = NULL; /* might not be valid anymore. */
2694#endif
2695 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2696 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2697 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2698 }
2699
2700 /*
2701 * Execute the default action and merge the status codes.
2702 */
2703 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2704 {
2705 memcpy(pvDst, pvBuf, cbRange);
2706 rcStrict2 = VINF_SUCCESS;
2707 }
2708 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2709 {
2710 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2711 return rcStrict2;
2712 }
2713 else
2714 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2715
2716 /*
2717 * Advance if we've got more stuff to do.
2718 */
2719 if (cbRange >= cbWrite)
2720 {
2721 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2722 return rcStrict;
2723 }
2724
2725
2726 cbWrite -= cbRange;
2727 GCPhys += cbRange;
2728 pvBuf = (uint8_t *)pvBuf + cbRange;
2729 pvDst = (uint8_t *)pvDst + cbRange;
2730
2731 offPhys -= cbRange;
2732 offPhysLast -= cbRange;
2733 }
2734}
2735
2736
2737/**
2738 * Write to physical memory.
2739 *
2740 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2741 * want to ignore those.
2742 *
2743 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2744 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2745 * @retval VINF_SUCCESS in all context - write completed.
2746 *
2747 * @retval VINF_EM_OFF in RC and R0 - write completed.
2748 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2749 * @retval VINF_EM_RESET in RC and R0 - write completed.
2750 * @retval VINF_EM_HALT in RC and R0 - write completed.
2751 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2752 *
2753 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2754 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2755 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2756 *
2757 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2758 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2759 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2760 *
2761 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2762 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2763 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2764 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2765 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2766 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2767 *
2768 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2769 * haven't been cleared for strict status codes yet.
2770 *
2771 *
2772 * @param pVM The cross context VM structure.
2773 * @param GCPhys Physical address to write to.
2774 * @param pvBuf What to write.
2775 * @param cbWrite How many bytes to write.
2776 * @param enmOrigin Who is calling.
2777 */
2778VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2779{
2780 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2781 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2782 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2783
2784 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2785 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2786
2787 pgmLock(pVM);
2788
2789 /*
2790 * Copy loop on ram ranges.
2791 */
2792 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2793 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2794 for (;;)
2795 {
2796 /* Inside range or not? */
2797 if (pRam && GCPhys >= pRam->GCPhys)
2798 {
2799 /*
2800 * Must work our way thru this page by page.
2801 */
2802 RTGCPTR off = GCPhys - pRam->GCPhys;
2803 while (off < pRam->cb)
2804 {
2805 RTGCPTR iPage = off >> PAGE_SHIFT;
2806 PPGMPAGE pPage = &pRam->aPages[iPage];
2807 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2808 if (cb > cbWrite)
2809 cb = cbWrite;
2810
2811 /*
2812 * Normal page? Get the pointer to it.
2813 */
2814 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2815 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2816 {
2817 PGMPAGEMAPLOCK PgMpLck;
2818 void *pvDst;
2819 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2820 if (RT_SUCCESS(rc))
2821 {
2822 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2823 memcpy(pvDst, pvBuf, cb);
2824 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2825 }
2826 /* Ignore writes to ballooned pages. */
2827 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2828 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2829 pRam->GCPhys + off, pPage, rc));
2830 }
2831 /*
2832 * Active WRITE or ALL access handlers.
2833 */
2834 else
2835 {
2836 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2837 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2838 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2839 else
2840 {
2841 pgmUnlock(pVM);
2842 return rcStrict2;
2843 }
2844 }
2845
2846 /* next page */
2847 if (cb >= cbWrite)
2848 {
2849 pgmUnlock(pVM);
2850 return rcStrict;
2851 }
2852
2853 cbWrite -= cb;
2854 off += cb;
2855 pvBuf = (const char *)pvBuf + cb;
2856 } /* walk pages in ram range */
2857
2858 GCPhys = pRam->GCPhysLast + 1;
2859 }
2860 else
2861 {
2862 /*
2863 * Unassigned address space, skip it.
2864 */
2865 if (!pRam)
2866 break;
2867 size_t cb = pRam->GCPhys - GCPhys;
2868 if (cb >= cbWrite)
2869 break;
2870 cbWrite -= cb;
2871 pvBuf = (const char *)pvBuf + cb;
2872 GCPhys += cb;
2873 }
2874
2875 /* Advance range if necessary. */
2876 while (pRam && GCPhys > pRam->GCPhysLast)
2877 pRam = pRam->CTX_SUFF(pNext);
2878 } /* Ram range walk */
2879
2880 pgmUnlock(pVM);
2881 return rcStrict;
2882}
2883
2884
2885/**
2886 * Read from guest physical memory by GC physical address, bypassing
2887 * MMIO and access handlers.
2888 *
2889 * @returns VBox status code.
2890 * @param pVM The cross context VM structure.
2891 * @param pvDst The destination address.
2892 * @param GCPhysSrc The source address (GC physical address).
2893 * @param cb The number of bytes to read.
2894 */
2895VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2896{
2897 /*
2898 * Treat the first page as a special case.
2899 */
2900 if (!cb)
2901 return VINF_SUCCESS;
2902
2903 /* map the 1st page */
2904 void const *pvSrc;
2905 PGMPAGEMAPLOCK Lock;
2906 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2907 if (RT_FAILURE(rc))
2908 return rc;
2909
2910 /* optimize for the case where access is completely within the first page. */
2911 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2912 if (RT_LIKELY(cb <= cbPage))
2913 {
2914 memcpy(pvDst, pvSrc, cb);
2915 PGMPhysReleasePageMappingLock(pVM, &Lock);
2916 return VINF_SUCCESS;
2917 }
2918
2919 /* copy to the end of the page. */
2920 memcpy(pvDst, pvSrc, cbPage);
2921 PGMPhysReleasePageMappingLock(pVM, &Lock);
2922 GCPhysSrc += cbPage;
2923 pvDst = (uint8_t *)pvDst + cbPage;
2924 cb -= cbPage;
2925
2926 /*
2927 * Page by page.
2928 */
2929 for (;;)
2930 {
2931 /* map the page */
2932 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2933 if (RT_FAILURE(rc))
2934 return rc;
2935
2936 /* last page? */
2937 if (cb <= PAGE_SIZE)
2938 {
2939 memcpy(pvDst, pvSrc, cb);
2940 PGMPhysReleasePageMappingLock(pVM, &Lock);
2941 return VINF_SUCCESS;
2942 }
2943
2944 /* copy the entire page and advance */
2945 memcpy(pvDst, pvSrc, PAGE_SIZE);
2946 PGMPhysReleasePageMappingLock(pVM, &Lock);
2947 GCPhysSrc += PAGE_SIZE;
2948 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2949 cb -= PAGE_SIZE;
2950 }
2951 /* won't ever get here. */
2952}
2953
2954
2955/**
2956 * Write to guest physical memory referenced by GC pointer.
2957 * Write memory to GC physical address in guest physical memory.
2958 *
2959 * This will bypass MMIO and access handlers.
2960 *
2961 * @returns VBox status code.
2962 * @param pVM The cross context VM structure.
2963 * @param GCPhysDst The GC physical address of the destination.
2964 * @param pvSrc The source buffer.
2965 * @param cb The number of bytes to write.
2966 */
2967VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2968{
2969 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2970
2971 /*
2972 * Treat the first page as a special case.
2973 */
2974 if (!cb)
2975 return VINF_SUCCESS;
2976
2977 /* map the 1st page */
2978 void *pvDst;
2979 PGMPAGEMAPLOCK Lock;
2980 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2981 if (RT_FAILURE(rc))
2982 return rc;
2983
2984 /* optimize for the case where access is completely within the first page. */
2985 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2986 if (RT_LIKELY(cb <= cbPage))
2987 {
2988 memcpy(pvDst, pvSrc, cb);
2989 PGMPhysReleasePageMappingLock(pVM, &Lock);
2990 return VINF_SUCCESS;
2991 }
2992
2993 /* copy to the end of the page. */
2994 memcpy(pvDst, pvSrc, cbPage);
2995 PGMPhysReleasePageMappingLock(pVM, &Lock);
2996 GCPhysDst += cbPage;
2997 pvSrc = (const uint8_t *)pvSrc + cbPage;
2998 cb -= cbPage;
2999
3000 /*
3001 * Page by page.
3002 */
3003 for (;;)
3004 {
3005 /* map the page */
3006 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3007 if (RT_FAILURE(rc))
3008 return rc;
3009
3010 /* last page? */
3011 if (cb <= PAGE_SIZE)
3012 {
3013 memcpy(pvDst, pvSrc, cb);
3014 PGMPhysReleasePageMappingLock(pVM, &Lock);
3015 return VINF_SUCCESS;
3016 }
3017
3018 /* copy the entire page and advance */
3019 memcpy(pvDst, pvSrc, PAGE_SIZE);
3020 PGMPhysReleasePageMappingLock(pVM, &Lock);
3021 GCPhysDst += PAGE_SIZE;
3022 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3023 cb -= PAGE_SIZE;
3024 }
3025 /* won't ever get here. */
3026}
3027
3028
3029/**
3030 * Read from guest physical memory referenced by GC pointer.
3031 *
3032 * This function uses the current CR3/CR0/CR4 of the guest and will
3033 * bypass access handlers and not set any accessed bits.
3034 *
3035 * @returns VBox status code.
3036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3037 * @param pvDst The destination address.
3038 * @param GCPtrSrc The source address (GC pointer).
3039 * @param cb The number of bytes to read.
3040 */
3041VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3042{
3043 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3044/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3045
3046 /*
3047 * Treat the first page as a special case.
3048 */
3049 if (!cb)
3050 return VINF_SUCCESS;
3051
3052 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3053 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3054
3055 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3056 * when many VCPUs are fighting for the lock.
3057 */
3058 pgmLock(pVM);
3059
3060 /* map the 1st page */
3061 void const *pvSrc;
3062 PGMPAGEMAPLOCK Lock;
3063 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3064 if (RT_FAILURE(rc))
3065 {
3066 pgmUnlock(pVM);
3067 return rc;
3068 }
3069
3070 /* optimize for the case where access is completely within the first page. */
3071 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3072 if (RT_LIKELY(cb <= cbPage))
3073 {
3074 memcpy(pvDst, pvSrc, cb);
3075 PGMPhysReleasePageMappingLock(pVM, &Lock);
3076 pgmUnlock(pVM);
3077 return VINF_SUCCESS;
3078 }
3079
3080 /* copy to the end of the page. */
3081 memcpy(pvDst, pvSrc, cbPage);
3082 PGMPhysReleasePageMappingLock(pVM, &Lock);
3083 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3084 pvDst = (uint8_t *)pvDst + cbPage;
3085 cb -= cbPage;
3086
3087 /*
3088 * Page by page.
3089 */
3090 for (;;)
3091 {
3092 /* map the page */
3093 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3094 if (RT_FAILURE(rc))
3095 {
3096 pgmUnlock(pVM);
3097 return rc;
3098 }
3099
3100 /* last page? */
3101 if (cb <= PAGE_SIZE)
3102 {
3103 memcpy(pvDst, pvSrc, cb);
3104 PGMPhysReleasePageMappingLock(pVM, &Lock);
3105 pgmUnlock(pVM);
3106 return VINF_SUCCESS;
3107 }
3108
3109 /* copy the entire page and advance */
3110 memcpy(pvDst, pvSrc, PAGE_SIZE);
3111 PGMPhysReleasePageMappingLock(pVM, &Lock);
3112 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3113 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3114 cb -= PAGE_SIZE;
3115 }
3116 /* won't ever get here. */
3117}
3118
3119
3120/**
3121 * Write to guest physical memory referenced by GC pointer.
3122 *
3123 * This function uses the current CR3/CR0/CR4 of the guest and will
3124 * bypass access handlers and not set dirty or accessed bits.
3125 *
3126 * @returns VBox status code.
3127 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3128 * @param GCPtrDst The destination address (GC pointer).
3129 * @param pvSrc The source address.
3130 * @param cb The number of bytes to write.
3131 */
3132VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3133{
3134 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3135 VMCPU_ASSERT_EMT(pVCpu);
3136
3137 /*
3138 * Treat the first page as a special case.
3139 */
3140 if (!cb)
3141 return VINF_SUCCESS;
3142
3143 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3144 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3145
3146 /* map the 1st page */
3147 void *pvDst;
3148 PGMPAGEMAPLOCK Lock;
3149 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3150 if (RT_FAILURE(rc))
3151 return rc;
3152
3153 /* optimize for the case where access is completely within the first page. */
3154 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3155 if (RT_LIKELY(cb <= cbPage))
3156 {
3157 memcpy(pvDst, pvSrc, cb);
3158 PGMPhysReleasePageMappingLock(pVM, &Lock);
3159 return VINF_SUCCESS;
3160 }
3161
3162 /* copy to the end of the page. */
3163 memcpy(pvDst, pvSrc, cbPage);
3164 PGMPhysReleasePageMappingLock(pVM, &Lock);
3165 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3166 pvSrc = (const uint8_t *)pvSrc + cbPage;
3167 cb -= cbPage;
3168
3169 /*
3170 * Page by page.
3171 */
3172 for (;;)
3173 {
3174 /* map the page */
3175 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3176 if (RT_FAILURE(rc))
3177 return rc;
3178
3179 /* last page? */
3180 if (cb <= PAGE_SIZE)
3181 {
3182 memcpy(pvDst, pvSrc, cb);
3183 PGMPhysReleasePageMappingLock(pVM, &Lock);
3184 return VINF_SUCCESS;
3185 }
3186
3187 /* copy the entire page and advance */
3188 memcpy(pvDst, pvSrc, PAGE_SIZE);
3189 PGMPhysReleasePageMappingLock(pVM, &Lock);
3190 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3191 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3192 cb -= PAGE_SIZE;
3193 }
3194 /* won't ever get here. */
3195}
3196
3197
3198/**
3199 * Write to guest physical memory referenced by GC pointer and update the PTE.
3200 *
3201 * This function uses the current CR3/CR0/CR4 of the guest and will
3202 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3203 *
3204 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3205 *
3206 * @returns VBox status code.
3207 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3208 * @param GCPtrDst The destination address (GC pointer).
3209 * @param pvSrc The source address.
3210 * @param cb The number of bytes to write.
3211 */
3212VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3213{
3214 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3215 VMCPU_ASSERT_EMT(pVCpu);
3216
3217 /*
3218 * Treat the first page as a special case.
3219 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3220 */
3221 if (!cb)
3222 return VINF_SUCCESS;
3223
3224 /* map the 1st page */
3225 void *pvDst;
3226 PGMPAGEMAPLOCK Lock;
3227 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3228 if (RT_FAILURE(rc))
3229 return rc;
3230
3231 /* optimize for the case where access is completely within the first page. */
3232 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3233 if (RT_LIKELY(cb <= cbPage))
3234 {
3235 memcpy(pvDst, pvSrc, cb);
3236 PGMPhysReleasePageMappingLock(pVM, &Lock);
3237 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3238 return VINF_SUCCESS;
3239 }
3240
3241 /* copy to the end of the page. */
3242 memcpy(pvDst, pvSrc, cbPage);
3243 PGMPhysReleasePageMappingLock(pVM, &Lock);
3244 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3245 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3246 pvSrc = (const uint8_t *)pvSrc + cbPage;
3247 cb -= cbPage;
3248
3249 /*
3250 * Page by page.
3251 */
3252 for (;;)
3253 {
3254 /* map the page */
3255 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3256 if (RT_FAILURE(rc))
3257 return rc;
3258
3259 /* last page? */
3260 if (cb <= PAGE_SIZE)
3261 {
3262 memcpy(pvDst, pvSrc, cb);
3263 PGMPhysReleasePageMappingLock(pVM, &Lock);
3264 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3265 return VINF_SUCCESS;
3266 }
3267
3268 /* copy the entire page and advance */
3269 memcpy(pvDst, pvSrc, PAGE_SIZE);
3270 PGMPhysReleasePageMappingLock(pVM, &Lock);
3271 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3272 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3273 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3274 cb -= PAGE_SIZE;
3275 }
3276 /* won't ever get here. */
3277}
3278
3279
3280/**
3281 * Read from guest physical memory referenced by GC pointer.
3282 *
3283 * This function uses the current CR3/CR0/CR4 of the guest and will
3284 * respect access handlers and set accessed bits.
3285 *
3286 * @returns Strict VBox status, see PGMPhysRead for details.
3287 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3288 * specified virtual address.
3289 *
3290 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3291 * @param pvDst The destination address.
3292 * @param GCPtrSrc The source address (GC pointer).
3293 * @param cb The number of bytes to read.
3294 * @param enmOrigin Who is calling.
3295 * @thread EMT(pVCpu)
3296 */
3297VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3298{
3299 RTGCPHYS GCPhys;
3300 uint64_t fFlags;
3301 int rc;
3302 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3303 VMCPU_ASSERT_EMT(pVCpu);
3304
3305 /*
3306 * Anything to do?
3307 */
3308 if (!cb)
3309 return VINF_SUCCESS;
3310
3311 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3312
3313 /*
3314 * Optimize reads within a single page.
3315 */
3316 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3317 {
3318 /* Convert virtual to physical address + flags */
3319 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3320 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3321 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3322
3323 /* mark the guest page as accessed. */
3324 if (!(fFlags & X86_PTE_A))
3325 {
3326 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3327 AssertRC(rc);
3328 }
3329
3330 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3331 }
3332
3333 /*
3334 * Page by page.
3335 */
3336 for (;;)
3337 {
3338 /* Convert virtual to physical address + flags */
3339 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3340 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3341 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3342
3343 /* mark the guest page as accessed. */
3344 if (!(fFlags & X86_PTE_A))
3345 {
3346 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3347 AssertRC(rc);
3348 }
3349
3350 /* copy */
3351 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3352 if (cbRead < cb)
3353 {
3354 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3355 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3356 { /* likely */ }
3357 else
3358 return rcStrict;
3359 }
3360 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3361 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3362
3363 /* next */
3364 Assert(cb > cbRead);
3365 cb -= cbRead;
3366 pvDst = (uint8_t *)pvDst + cbRead;
3367 GCPtrSrc += cbRead;
3368 }
3369}
3370
3371
3372/**
3373 * Write to guest physical memory referenced by GC pointer.
3374 *
3375 * This function uses the current CR3/CR0/CR4 of the guest and will
3376 * respect access handlers and set dirty and accessed bits.
3377 *
3378 * @returns Strict VBox status, see PGMPhysWrite for details.
3379 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3380 * specified virtual address.
3381 *
3382 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3383 * @param GCPtrDst The destination address (GC pointer).
3384 * @param pvSrc The source address.
3385 * @param cb The number of bytes to write.
3386 * @param enmOrigin Who is calling.
3387 */
3388VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3389{
3390 RTGCPHYS GCPhys;
3391 uint64_t fFlags;
3392 int rc;
3393 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3394 VMCPU_ASSERT_EMT(pVCpu);
3395
3396 /*
3397 * Anything to do?
3398 */
3399 if (!cb)
3400 return VINF_SUCCESS;
3401
3402 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3403
3404 /*
3405 * Optimize writes within a single page.
3406 */
3407 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3408 {
3409 /* Convert virtual to physical address + flags */
3410 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3411 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3412 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3413
3414 /* Mention when we ignore X86_PTE_RW... */
3415 if (!(fFlags & X86_PTE_RW))
3416 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3417
3418 /* Mark the guest page as accessed and dirty if necessary. */
3419 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3420 {
3421 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3422 AssertRC(rc);
3423 }
3424
3425 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3426 }
3427
3428 /*
3429 * Page by page.
3430 */
3431 for (;;)
3432 {
3433 /* Convert virtual to physical address + flags */
3434 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3435 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3436 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3437
3438 /* Mention when we ignore X86_PTE_RW... */
3439 if (!(fFlags & X86_PTE_RW))
3440 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3441
3442 /* Mark the guest page as accessed and dirty if necessary. */
3443 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3444 {
3445 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3446 AssertRC(rc);
3447 }
3448
3449 /* copy */
3450 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3451 if (cbWrite < cb)
3452 {
3453 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3454 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3455 { /* likely */ }
3456 else
3457 return rcStrict;
3458 }
3459 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3460 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3461
3462 /* next */
3463 Assert(cb > cbWrite);
3464 cb -= cbWrite;
3465 pvSrc = (uint8_t *)pvSrc + cbWrite;
3466 GCPtrDst += cbWrite;
3467 }
3468}
3469
3470
3471/**
3472 * Performs a read of guest virtual memory for instruction emulation.
3473 *
3474 * This will check permissions, raise exceptions and update the access bits.
3475 *
3476 * The current implementation will bypass all access handlers. It may later be
3477 * changed to at least respect MMIO.
3478 *
3479 *
3480 * @returns VBox status code suitable to scheduling.
3481 * @retval VINF_SUCCESS if the read was performed successfully.
3482 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3483 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3484 *
3485 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3486 * @param pCtxCore The context core.
3487 * @param pvDst Where to put the bytes we've read.
3488 * @param GCPtrSrc The source address.
3489 * @param cb The number of bytes to read. Not more than a page.
3490 *
3491 * @remark This function will dynamically map physical pages in GC. This may unmap
3492 * mappings done by the caller. Be careful!
3493 */
3494VMMDECL(int) PGMPhysInterpretedRead(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3495{
3496 NOREF(pCtxCore);
3497 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3498 Assert(cb <= PAGE_SIZE);
3499 VMCPU_ASSERT_EMT(pVCpu);
3500
3501/** @todo r=bird: This isn't perfect!
3502 * -# It's not checking for reserved bits being 1.
3503 * -# It's not correctly dealing with the access bit.
3504 * -# It's not respecting MMIO memory or any other access handlers.
3505 */
3506 /*
3507 * 1. Translate virtual to physical. This may fault.
3508 * 2. Map the physical address.
3509 * 3. Do the read operation.
3510 * 4. Set access bits if required.
3511 */
3512 int rc;
3513 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3514 if (cb <= cb1)
3515 {
3516 /*
3517 * Not crossing pages.
3518 */
3519 RTGCPHYS GCPhys;
3520 uint64_t fFlags;
3521 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3522 if (RT_SUCCESS(rc))
3523 {
3524 /** @todo we should check reserved bits ... */
3525 PGMPAGEMAPLOCK PgMpLck;
3526 void const *pvSrc;
3527 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3528 switch (rc)
3529 {
3530 case VINF_SUCCESS:
3531 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3532 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3533 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3534 break;
3535 case VERR_PGM_PHYS_PAGE_RESERVED:
3536 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3537 memset(pvDst, 0xff, cb);
3538 break;
3539 default:
3540 Assert(RT_FAILURE_NP(rc));
3541 return rc;
3542 }
3543
3544 /** @todo access bit emulation isn't 100% correct. */
3545 if (!(fFlags & X86_PTE_A))
3546 {
3547 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3548 AssertRC(rc);
3549 }
3550 return VINF_SUCCESS;
3551 }
3552 }
3553 else
3554 {
3555 /*
3556 * Crosses pages.
3557 */
3558 size_t cb2 = cb - cb1;
3559 uint64_t fFlags1;
3560 RTGCPHYS GCPhys1;
3561 uint64_t fFlags2;
3562 RTGCPHYS GCPhys2;
3563 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3564 if (RT_SUCCESS(rc))
3565 {
3566 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3567 if (RT_SUCCESS(rc))
3568 {
3569 /** @todo we should check reserved bits ... */
3570 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3571 PGMPAGEMAPLOCK PgMpLck;
3572 void const *pvSrc1;
3573 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3574 switch (rc)
3575 {
3576 case VINF_SUCCESS:
3577 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3578 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3579 break;
3580 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3581 memset(pvDst, 0xff, cb1);
3582 break;
3583 default:
3584 Assert(RT_FAILURE_NP(rc));
3585 return rc;
3586 }
3587
3588 void const *pvSrc2;
3589 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3590 switch (rc)
3591 {
3592 case VINF_SUCCESS:
3593 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3594 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3595 break;
3596 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3597 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3598 break;
3599 default:
3600 Assert(RT_FAILURE_NP(rc));
3601 return rc;
3602 }
3603
3604 if (!(fFlags1 & X86_PTE_A))
3605 {
3606 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3607 AssertRC(rc);
3608 }
3609 if (!(fFlags2 & X86_PTE_A))
3610 {
3611 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3612 AssertRC(rc);
3613 }
3614 return VINF_SUCCESS;
3615 }
3616 }
3617 }
3618
3619 /*
3620 * Raise a #PF.
3621 */
3622 uint32_t uErr;
3623
3624 /* Get the current privilege level. */
3625 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3626 switch (rc)
3627 {
3628 case VINF_SUCCESS:
3629 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3630 break;
3631
3632 case VERR_PAGE_NOT_PRESENT:
3633 case VERR_PAGE_TABLE_NOT_PRESENT:
3634 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3635 break;
3636
3637 default:
3638 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3639 return rc;
3640 }
3641 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3642 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
3643 if (RT_SUCCESS(rc))
3644 return VINF_EM_RAW_GUEST_TRAP;
3645 return rc;
3646}
3647
3648
3649/**
3650 * Performs a read of guest virtual memory for instruction emulation.
3651 *
3652 * This will check permissions, raise exceptions and update the access bits.
3653 *
3654 * The current implementation will bypass all access handlers. It may later be
3655 * changed to at least respect MMIO.
3656 *
3657 *
3658 * @returns VBox status code suitable to scheduling.
3659 * @retval VINF_SUCCESS if the read was performed successfully.
3660 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3661 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3662 *
3663 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3664 * @param pCtxCore The context core.
3665 * @param pvDst Where to put the bytes we've read.
3666 * @param GCPtrSrc The source address.
3667 * @param cb The number of bytes to read. Not more than a page.
3668 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3669 * an appropriate error status will be returned (no
3670 * informational at all).
3671 *
3672 *
3673 * @remarks Takes the PGM lock.
3674 * @remarks A page fault on the 2nd page of the access will be raised without
3675 * writing the bits on the first page since we're ASSUMING that the
3676 * caller is emulating an instruction access.
3677 * @remarks This function will dynamically map physical pages in GC. This may
3678 * unmap mappings done by the caller. Be careful!
3679 */
3680VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3681 bool fRaiseTrap)
3682{
3683 NOREF(pCtxCore);
3684 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3685 Assert(cb <= PAGE_SIZE);
3686 VMCPU_ASSERT_EMT(pVCpu);
3687
3688 /*
3689 * 1. Translate virtual to physical. This may fault.
3690 * 2. Map the physical address.
3691 * 3. Do the read operation.
3692 * 4. Set access bits if required.
3693 */
3694 int rc;
3695 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3696 if (cb <= cb1)
3697 {
3698 /*
3699 * Not crossing pages.
3700 */
3701 RTGCPHYS GCPhys;
3702 uint64_t fFlags;
3703 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3704 if (RT_SUCCESS(rc))
3705 {
3706 if (1) /** @todo we should check reserved bits ... */
3707 {
3708 const void *pvSrc;
3709 PGMPAGEMAPLOCK Lock;
3710 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3711 switch (rc)
3712 {
3713 case VINF_SUCCESS:
3714 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3715 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3716 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3717 PGMPhysReleasePageMappingLock(pVM, &Lock);
3718 break;
3719 case VERR_PGM_PHYS_PAGE_RESERVED:
3720 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3721 memset(pvDst, 0xff, cb);
3722 break;
3723 default:
3724 AssertMsgFailed(("%Rrc\n", rc));
3725 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3726 return rc;
3727 }
3728
3729 if (!(fFlags & X86_PTE_A))
3730 {
3731 /** @todo access bit emulation isn't 100% correct. */
3732 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3733 AssertRC(rc);
3734 }
3735 return VINF_SUCCESS;
3736 }
3737 }
3738 }
3739 else
3740 {
3741 /*
3742 * Crosses pages.
3743 */
3744 size_t cb2 = cb - cb1;
3745 uint64_t fFlags1;
3746 RTGCPHYS GCPhys1;
3747 uint64_t fFlags2;
3748 RTGCPHYS GCPhys2;
3749 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3750 if (RT_SUCCESS(rc))
3751 {
3752 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3753 if (RT_SUCCESS(rc))
3754 {
3755 if (1) /** @todo we should check reserved bits ... */
3756 {
3757 const void *pvSrc;
3758 PGMPAGEMAPLOCK Lock;
3759 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3760 switch (rc)
3761 {
3762 case VINF_SUCCESS:
3763 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3764 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3765 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3766 PGMPhysReleasePageMappingLock(pVM, &Lock);
3767 break;
3768 case VERR_PGM_PHYS_PAGE_RESERVED:
3769 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3770 memset(pvDst, 0xff, cb1);
3771 break;
3772 default:
3773 AssertMsgFailed(("%Rrc\n", rc));
3774 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3775 return rc;
3776 }
3777
3778 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3779 switch (rc)
3780 {
3781 case VINF_SUCCESS:
3782 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3783 PGMPhysReleasePageMappingLock(pVM, &Lock);
3784 break;
3785 case VERR_PGM_PHYS_PAGE_RESERVED:
3786 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3787 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3788 break;
3789 default:
3790 AssertMsgFailed(("%Rrc\n", rc));
3791 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3792 return rc;
3793 }
3794
3795 if (!(fFlags1 & X86_PTE_A))
3796 {
3797 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3798 AssertRC(rc);
3799 }
3800 if (!(fFlags2 & X86_PTE_A))
3801 {
3802 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3803 AssertRC(rc);
3804 }
3805 return VINF_SUCCESS;
3806 }
3807 /* sort out which page */
3808 }
3809 else
3810 GCPtrSrc += cb1; /* fault on 2nd page */
3811 }
3812 }
3813
3814 /*
3815 * Raise a #PF if we're allowed to do that.
3816 */
3817 /* Calc the error bits. */
3818 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3819 uint32_t uErr;
3820 switch (rc)
3821 {
3822 case VINF_SUCCESS:
3823 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3824 rc = VERR_ACCESS_DENIED;
3825 break;
3826
3827 case VERR_PAGE_NOT_PRESENT:
3828 case VERR_PAGE_TABLE_NOT_PRESENT:
3829 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3830 break;
3831
3832 default:
3833 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3834 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3835 return rc;
3836 }
3837 if (fRaiseTrap)
3838 {
3839 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3840 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
3841 if (RT_SUCCESS(rc))
3842 return VINF_EM_RAW_GUEST_TRAP;
3843 return rc;
3844 }
3845 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3846 return rc;
3847}
3848
3849
3850/**
3851 * Performs a write to guest virtual memory for instruction emulation.
3852 *
3853 * This will check permissions, raise exceptions and update the dirty and access
3854 * bits.
3855 *
3856 * @returns VBox status code suitable to scheduling.
3857 * @retval VINF_SUCCESS if the read was performed successfully.
3858 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3859 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3860 *
3861 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3862 * @param pCtxCore The context core.
3863 * @param GCPtrDst The destination address.
3864 * @param pvSrc What to write.
3865 * @param cb The number of bytes to write. Not more than a page.
3866 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3867 * an appropriate error status will be returned (no
3868 * informational at all).
3869 *
3870 * @remarks Takes the PGM lock.
3871 * @remarks A page fault on the 2nd page of the access will be raised without
3872 * writing the bits on the first page since we're ASSUMING that the
3873 * caller is emulating an instruction access.
3874 * @remarks This function will dynamically map physical pages in GC. This may
3875 * unmap mappings done by the caller. Be careful!
3876 */
3877VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3878 size_t cb, bool fRaiseTrap)
3879{
3880 NOREF(pCtxCore);
3881 Assert(cb <= PAGE_SIZE);
3882 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3883 VMCPU_ASSERT_EMT(pVCpu);
3884
3885 /*
3886 * 1. Translate virtual to physical. This may fault.
3887 * 2. Map the physical address.
3888 * 3. Do the write operation.
3889 * 4. Set access bits if required.
3890 */
3891 /** @todo Since this method is frequently used by EMInterpret or IOM
3892 * upon a write fault to an write access monitored page, we can
3893 * reuse the guest page table walking from the \#PF code. */
3894 int rc;
3895 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3896 if (cb <= cb1)
3897 {
3898 /*
3899 * Not crossing pages.
3900 */
3901 RTGCPHYS GCPhys;
3902 uint64_t fFlags;
3903 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3904 if (RT_SUCCESS(rc))
3905 {
3906 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3907 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3908 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3909 {
3910 void *pvDst;
3911 PGMPAGEMAPLOCK Lock;
3912 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3913 switch (rc)
3914 {
3915 case VINF_SUCCESS:
3916 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3917 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3918 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3919 PGMPhysReleasePageMappingLock(pVM, &Lock);
3920 break;
3921 case VERR_PGM_PHYS_PAGE_RESERVED:
3922 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3923 /* bit bucket */
3924 break;
3925 default:
3926 AssertMsgFailed(("%Rrc\n", rc));
3927 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3928 return rc;
3929 }
3930
3931 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3932 {
3933 /** @todo dirty & access bit emulation isn't 100% correct. */
3934 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3935 AssertRC(rc);
3936 }
3937 return VINF_SUCCESS;
3938 }
3939 rc = VERR_ACCESS_DENIED;
3940 }
3941 }
3942 else
3943 {
3944 /*
3945 * Crosses pages.
3946 */
3947 size_t cb2 = cb - cb1;
3948 uint64_t fFlags1;
3949 RTGCPHYS GCPhys1;
3950 uint64_t fFlags2;
3951 RTGCPHYS GCPhys2;
3952 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3953 if (RT_SUCCESS(rc))
3954 {
3955 rc = PGMGstGetPage(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3956 if (RT_SUCCESS(rc))
3957 {
3958 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3959 && (fFlags2 & X86_PTE_RW))
3960 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3961 && CPUMGetGuestCPL(pVCpu) <= 2) )
3962 {
3963 void *pvDst;
3964 PGMPAGEMAPLOCK Lock;
3965 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3966 switch (rc)
3967 {
3968 case VINF_SUCCESS:
3969 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3970 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3971 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3972 PGMPhysReleasePageMappingLock(pVM, &Lock);
3973 break;
3974 case VERR_PGM_PHYS_PAGE_RESERVED:
3975 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3976 /* bit bucket */
3977 break;
3978 default:
3979 AssertMsgFailed(("%Rrc\n", rc));
3980 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3981 return rc;
3982 }
3983
3984 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3985 switch (rc)
3986 {
3987 case VINF_SUCCESS:
3988 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3989 PGMPhysReleasePageMappingLock(pVM, &Lock);
3990 break;
3991 case VERR_PGM_PHYS_PAGE_RESERVED:
3992 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3993 /* bit bucket */
3994 break;
3995 default:
3996 AssertMsgFailed(("%Rrc\n", rc));
3997 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3998 return rc;
3999 }
4000
4001 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
4002 {
4003 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4004 AssertRC(rc);
4005 }
4006 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
4007 {
4008 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4009 AssertRC(rc);
4010 }
4011 return VINF_SUCCESS;
4012 }
4013 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
4014 GCPtrDst += cb1; /* fault on the 2nd page. */
4015 rc = VERR_ACCESS_DENIED;
4016 }
4017 else
4018 GCPtrDst += cb1; /* fault on the 2nd page. */
4019 }
4020 }
4021
4022 /*
4023 * Raise a #PF if we're allowed to do that.
4024 */
4025 /* Calc the error bits. */
4026 uint32_t uErr;
4027 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4028 switch (rc)
4029 {
4030 case VINF_SUCCESS:
4031 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4032 rc = VERR_ACCESS_DENIED;
4033 break;
4034
4035 case VERR_ACCESS_DENIED:
4036 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4037 break;
4038
4039 case VERR_PAGE_NOT_PRESENT:
4040 case VERR_PAGE_TABLE_NOT_PRESENT:
4041 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4042 break;
4043
4044 default:
4045 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4046 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4047 return rc;
4048 }
4049 if (fRaiseTrap)
4050 {
4051 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4052 rc = TRPMAssertXcptPF(pVCpu, GCPtrDst, uErr);
4053 if (RT_SUCCESS(rc))
4054 return VINF_EM_RAW_GUEST_TRAP;
4055 return rc;
4056 }
4057 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4058 return rc;
4059}
4060
4061
4062/**
4063 * Return the page type of the specified physical address.
4064 *
4065 * @returns The page type.
4066 * @param pVM The cross context VM structure.
4067 * @param GCPhys Guest physical address
4068 */
4069VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
4070{
4071 pgmLock(pVM);
4072 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4073 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4074 pgmUnlock(pVM);
4075
4076 return enmPgType;
4077}
4078
4079
4080/**
4081 * Converts a GC physical address to a HC ring-3 pointer, with some
4082 * additional checks.
4083 *
4084 * @returns VBox status code (no informational statuses).
4085 *
4086 * @param pVM The cross context VM structure.
4087 * @param pVCpu The cross context virtual CPU structure of the
4088 * calling EMT.
4089 * @param GCPhys The GC physical address to convert. This API mask
4090 * the A20 line when necessary.
4091 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
4092 * be done while holding the PGM lock.
4093 * @param ppb Where to store the pointer corresponding to GCPhys
4094 * on success.
4095 * @param pfTlb The TLB flags and revision. We only add stuff.
4096 *
4097 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
4098 * PGMPhysIemGCPhys2Ptr.
4099 *
4100 * @thread EMT(pVCpu).
4101 */
4102VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
4103 R3R0PTRTYPE(uint8_t *) *ppb,
4104 uint64_t *pfTlb)
4105{
4106 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4107 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
4108
4109 pgmLock(pVM);
4110
4111 PPGMRAMRANGE pRam;
4112 PPGMPAGE pPage;
4113 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4114 if (RT_SUCCESS(rc))
4115 {
4116 if (!PGM_PAGE_IS_BALLOONED(pPage))
4117 {
4118 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4119 {
4120 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
4121 {
4122 /*
4123 * No access handler.
4124 */
4125 switch (PGM_PAGE_GET_STATE(pPage))
4126 {
4127 case PGM_PAGE_STATE_ALLOCATED:
4128 *pfTlb |= *puTlbPhysRev;
4129 break;
4130 case PGM_PAGE_STATE_BALLOONED:
4131 AssertFailed();
4132 RT_FALL_THRU();
4133 case PGM_PAGE_STATE_ZERO:
4134 case PGM_PAGE_STATE_SHARED:
4135 case PGM_PAGE_STATE_WRITE_MONITORED:
4136 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4137 break;
4138 }
4139
4140 PPGMPAGEMAPTLBE pTlbe;
4141 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4142 AssertLogRelRCReturn(rc, rc);
4143 *ppb = (uint8_t *)pTlbe->pv;
4144 }
4145 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
4146 {
4147 /*
4148 * MMIO or similar all access handler: Catch all access.
4149 */
4150 *pfTlb |= *puTlbPhysRev
4151 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4152 *ppb = NULL;
4153 }
4154 else
4155 {
4156 /*
4157 * Write access handler: Catch write accesses if active.
4158 */
4159 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
4160 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4161 else
4162 switch (PGM_PAGE_GET_STATE(pPage))
4163 {
4164 case PGM_PAGE_STATE_ALLOCATED:
4165 *pfTlb |= *puTlbPhysRev;
4166 break;
4167 case PGM_PAGE_STATE_BALLOONED:
4168 AssertFailed();
4169 RT_FALL_THRU();
4170 case PGM_PAGE_STATE_ZERO:
4171 case PGM_PAGE_STATE_SHARED:
4172 case PGM_PAGE_STATE_WRITE_MONITORED:
4173 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4174 break;
4175 }
4176
4177 PPGMPAGEMAPTLBE pTlbe;
4178 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4179 AssertLogRelRCReturn(rc, rc);
4180 *ppb = (uint8_t *)pTlbe->pv;
4181 }
4182 }
4183 else
4184 {
4185 /* Alias MMIO: For now, we catch all access. */
4186 *pfTlb |= *puTlbPhysRev
4187 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4188 *ppb = NULL;
4189 }
4190 }
4191 else
4192 {
4193 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
4194 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4195 *ppb = NULL;
4196 }
4197 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
4198 }
4199 else
4200 {
4201 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4202 *ppb = NULL;
4203 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
4204 }
4205
4206 pgmUnlock(pVM);
4207 return VINF_SUCCESS;
4208}
4209
4210
4211/**
4212 * Converts a GC physical address to a HC ring-3 pointer, with some
4213 * additional checks.
4214 *
4215 * @returns VBox status code (no informational statuses).
4216 * @retval VINF_SUCCESS on success.
4217 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4218 * access handler of some kind.
4219 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4220 * accesses or is odd in any way.
4221 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4222 *
4223 * @param pVM The cross context VM structure.
4224 * @param pVCpu The cross context virtual CPU structure of the
4225 * calling EMT.
4226 * @param GCPhys The GC physical address to convert. This API mask
4227 * the A20 line when necessary.
4228 * @param fWritable Whether write access is required.
4229 * @param fByPassHandlers Whether to bypass access handlers.
4230 * @param ppv Where to store the pointer corresponding to GCPhys
4231 * on success.
4232 * @param pLock
4233 *
4234 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4235 * @thread EMT(pVCpu).
4236 */
4237VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4238 void **ppv, PPGMPAGEMAPLOCK pLock)
4239{
4240 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4241
4242 pgmLock(pVM);
4243
4244 PPGMRAMRANGE pRam;
4245 PPGMPAGE pPage;
4246 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4247 if (RT_SUCCESS(rc))
4248 {
4249 if (PGM_PAGE_IS_BALLOONED(pPage))
4250 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4251 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4252 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4253 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4254 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4255 rc = VINF_SUCCESS;
4256 else
4257 {
4258 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4259 {
4260 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4261 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4262 }
4263 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4264 {
4265 Assert(!fByPassHandlers);
4266 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4267 }
4268 }
4269 if (RT_SUCCESS(rc))
4270 {
4271 int rc2;
4272
4273 /* Make sure what we return is writable. */
4274 if (fWritable)
4275 switch (PGM_PAGE_GET_STATE(pPage))
4276 {
4277 case PGM_PAGE_STATE_ALLOCATED:
4278 break;
4279 case PGM_PAGE_STATE_BALLOONED:
4280 AssertFailed();
4281 break;
4282 case PGM_PAGE_STATE_ZERO:
4283 case PGM_PAGE_STATE_SHARED:
4284 case PGM_PAGE_STATE_WRITE_MONITORED:
4285 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4286 AssertLogRelRCReturn(rc2, rc2);
4287 break;
4288 }
4289
4290 /* Get a ring-3 mapping of the address. */
4291 PPGMPAGEMAPTLBE pTlbe;
4292 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4293 AssertLogRelRCReturn(rc2, rc2);
4294
4295 /* Lock it and calculate the address. */
4296 if (fWritable)
4297 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4298 else
4299 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4300 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4301
4302 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4303 }
4304 else
4305 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4306
4307 /* else: handler catching all access, no pointer returned. */
4308 }
4309 else
4310 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4311
4312 pgmUnlock(pVM);
4313 return rc;
4314}
4315
4316
4317/**
4318 * Checks if the give GCPhys page requires special handling for the given access
4319 * because it's MMIO or otherwise monitored.
4320 *
4321 * @returns VBox status code (no informational statuses).
4322 * @retval VINF_SUCCESS on success.
4323 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4324 * access handler of some kind.
4325 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4326 * accesses or is odd in any way.
4327 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4328 *
4329 * @param pVM The cross context VM structure.
4330 * @param GCPhys The GC physical address to convert. Since this is
4331 * only used for filling the REM TLB, the A20 mask must
4332 * be applied before calling this API.
4333 * @param fWritable Whether write access is required.
4334 * @param fByPassHandlers Whether to bypass access handlers.
4335 *
4336 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4337 * a stop gap thing that should be removed once there is a better TLB
4338 * for virtual address accesses.
4339 */
4340VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4341{
4342 pgmLock(pVM);
4343 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4344
4345 PPGMRAMRANGE pRam;
4346 PPGMPAGE pPage;
4347 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4348 if (RT_SUCCESS(rc))
4349 {
4350 if (PGM_PAGE_IS_BALLOONED(pPage))
4351 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4352 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4353 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4354 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4355 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4356 rc = VINF_SUCCESS;
4357 else
4358 {
4359 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4360 {
4361 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4362 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4363 }
4364 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4365 {
4366 Assert(!fByPassHandlers);
4367 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4368 }
4369 }
4370 }
4371
4372 pgmUnlock(pVM);
4373 return rc;
4374}
4375
4376
4377/**
4378 * Interface used by NEM to check what to do on a memory access exit.
4379 *
4380 * @returns VBox status code.
4381 * @param pVM The cross context VM structure.
4382 * @param pVCpu The cross context per virtual CPU structure.
4383 * Optional.
4384 * @param GCPhys The guest physical address.
4385 * @param fMakeWritable Whether to try make the page writable or not. If it
4386 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
4387 * be returned and the return code will be unaffected
4388 * @param pInfo Where to return the page information. This is
4389 * initialized even on failure.
4390 * @param pfnChecker Page in-sync checker callback. Optional.
4391 * @param pvUser User argument to pass to pfnChecker.
4392 */
4393VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4394 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4395{
4396 pgmLock(pVM);
4397
4398 PPGMPAGE pPage;
4399 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4400 if (RT_SUCCESS(rc))
4401 {
4402 /* Try make it writable if requested. */
4403 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4404 if (fMakeWritable)
4405 switch (PGM_PAGE_GET_STATE(pPage))
4406 {
4407 case PGM_PAGE_STATE_SHARED:
4408 case PGM_PAGE_STATE_WRITE_MONITORED:
4409 case PGM_PAGE_STATE_ZERO:
4410 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4411 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4412 rc = VINF_SUCCESS;
4413 break;
4414 }
4415
4416 /* Fill in the info. */
4417 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4418 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4419 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4420 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4421 pInfo->enmType = enmType;
4422 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4423 switch (PGM_PAGE_GET_STATE(pPage))
4424 {
4425 case PGM_PAGE_STATE_ALLOCATED:
4426 pInfo->fZeroPage = 0;
4427 break;
4428
4429 case PGM_PAGE_STATE_ZERO:
4430 pInfo->fZeroPage = 1;
4431 break;
4432
4433 case PGM_PAGE_STATE_WRITE_MONITORED:
4434 pInfo->fZeroPage = 0;
4435 break;
4436
4437 case PGM_PAGE_STATE_SHARED:
4438 pInfo->fZeroPage = 0;
4439 break;
4440
4441 case PGM_PAGE_STATE_BALLOONED:
4442 pInfo->fZeroPage = 1;
4443 break;
4444
4445 default:
4446 pInfo->fZeroPage = 1;
4447 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4448 }
4449
4450 /* Call the checker and update NEM state. */
4451 if (pfnChecker)
4452 {
4453 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4454 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4455 }
4456
4457 /* Done. */
4458 pgmUnlock(pVM);
4459 }
4460 else
4461 {
4462 pgmUnlock(pVM);
4463
4464 pInfo->HCPhys = NIL_RTHCPHYS;
4465 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4466 pInfo->u2NemState = 0;
4467 pInfo->fHasHandlers = 0;
4468 pInfo->fZeroPage = 0;
4469 pInfo->enmType = PGMPAGETYPE_INVALID;
4470 }
4471
4472 return rc;
4473}
4474
4475
4476/**
4477 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4478 * or higher.
4479 *
4480 * @returns VBox status code from callback.
4481 * @param pVM The cross context VM structure.
4482 * @param pVCpu The cross context per CPU structure. This is
4483 * optional as its only for passing to callback.
4484 * @param uMinState The minimum NEM state value to call on.
4485 * @param pfnCallback The callback function.
4486 * @param pvUser User argument for the callback.
4487 */
4488VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4489 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4490{
4491 /*
4492 * Just brute force this problem.
4493 */
4494 pgmLock(pVM);
4495 int rc = VINF_SUCCESS;
4496 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4497 {
4498 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4499 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4500 {
4501 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4502 if (u2State < uMinState)
4503 { /* likely */ }
4504 else
4505 {
4506 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4507 if (RT_SUCCESS(rc))
4508 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4509 else
4510 break;
4511 }
4512 }
4513 }
4514 pgmUnlock(pVM);
4515
4516 return rc;
4517}
4518
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette