VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 55979

Last change on this file since 55979 was 55966, checked in by vboxsync, 10 years ago

PGM,++: VBOXSTRICTRC for physical access handlers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 157.0 KB
Line 
1/* $Id: PGMAllPhys.cpp 55966 2015-05-20 12:42:53Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param pVCpu Pointer to the cross context CPU context for the
61 * calling EMT.
62 * @param uErrorCode CPU Error code.
63 * @param pRegFrame Trap register frame.
64 * @param pvFault The fault address (cr2).
65 * @param GCPhysFault The GC physical address corresponding to pvFault.
66 * @param pvUser User argument.
67 */
68VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
69 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
70{
71 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
72 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
73}
74
75
76/**
77 * \#PF Handler callback for Guest ROM range write access.
78 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
79 *
80 * @returns VBox status code (appropriate for trap handling and GC return).
81 * @param pVM Pointer to the VM.
82 * @param pVCpu Pointer to the cross context CPU context for the
83 * calling EMT.
84 * @param uErrorCode CPU Error code.
85 * @param pRegFrame Trap register frame.
86 * @param pvFault The fault address (cr2).
87 * @param GCPhysFault The GC physical address corresponding to pvFault.
88 * @param pvUser User argument. Pointer to the ROM range structure.
89 */
90DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
91 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
92{
93 int rc;
94 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
95 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
96 NOREF(uErrorCode); NOREF(pvFault);
97
98 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
99
100 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
101 switch (pRom->aPages[iPage].enmProt)
102 {
103 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
104 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
105 {
106 /*
107 * If it's a simple instruction which doesn't change the cpu state
108 * we will simply skip it. Otherwise we'll have to defer it to REM.
109 */
110 uint32_t cbOp;
111 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
112 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
113 if ( RT_SUCCESS(rc)
114 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
115 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
116 {
117 switch (pDis->bOpCode)
118 {
119 /** @todo Find other instructions we can safely skip, possibly
120 * adding this kind of detection to DIS or EM. */
121 case OP_MOV:
122 pRegFrame->rip += cbOp;
123 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
124 return VINF_SUCCESS;
125 }
126 }
127 break;
128 }
129
130 case PGMROMPROT_READ_RAM_WRITE_RAM:
131 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
132 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
133 AssertRC(rc);
134 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
135
136 case PGMROMPROT_READ_ROM_WRITE_RAM:
137 /* Handle it in ring-3 because it's *way* easier there. */
138 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
139 break;
140
141 default:
142 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
143 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
144 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
145 }
146
147 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
148 return VINF_EM_RAW_EMULATE_INSTR;
149}
150
151#endif /* !IN_RING3 */
152
153
154/**
155 * Access handler callback for ROM write accesses.
156 *
157 * @returns VINF_SUCCESS if the handler have carried out the operation.
158 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
159 * @param pVM Pointer to the VM.
160 * @param pVCpu The cross context CPU structure for the calling EMT.
161 * @param GCPhys The physical address the guest is writing to.
162 * @param pvPhys The HC mapping of that address.
163 * @param pvBuf What the guest is reading/writing.
164 * @param cbBuf How much it's reading/writing.
165 * @param enmAccessType The access type.
166 * @param enmOrigin Who is making the access.
167 * @param pvUser User argument.
168 */
169PGM_ALL_CB2_DECL(VBOXSTRICTRC)
170pgmPhysRomWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
171 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
172{
173 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
174 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
175 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
176 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
177 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
178 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);
179
180 if (enmAccessType == PGMACCESSTYPE_READ)
181 {
182 switch (pRomPage->enmProt)
183 {
184 /*
185 * Take the default action.
186 */
187 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
188 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
189 case PGMROMPROT_READ_ROM_WRITE_RAM:
190 case PGMROMPROT_READ_RAM_WRITE_RAM:
191 return VINF_PGM_HANDLER_DO_DEFAULT;
192
193 default:
194 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
195 pRom->aPages[iPage].enmProt, iPage, GCPhys),
196 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
197 }
198 }
199 else
200 {
201 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
202 switch (pRomPage->enmProt)
203 {
204 /*
205 * Ignore writes.
206 */
207 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
208 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
209 return VINF_SUCCESS;
210
211 /*
212 * Write to the RAM page.
213 */
214 case PGMROMPROT_READ_ROM_WRITE_RAM:
215 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
216 {
217 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
218 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
219
220 /*
221 * Take the lock, do lazy allocation, map the page and copy the data.
222 *
223 * Note that we have to bypass the mapping TLB since it works on
224 * guest physical addresses and entering the shadow page would
225 * kind of screw things up...
226 */
227 int rc = pgmLock(pVM);
228 AssertRC(rc);
229
230 PPGMPAGE pShadowPage = &pRomPage->Shadow;
231 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
232 {
233 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
234 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE);
235 }
236
237 void *pvDstPage;
238 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
239 if (RT_SUCCESS(rc))
240 {
241 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
242 pRomPage->LiveSave.fWrittenTo = true;
243 }
244
245 pgmUnlock(pVM);
246 return rc;
247 }
248
249 default:
250 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
251 pRom->aPages[iPage].enmProt, iPage, GCPhys),
252 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
253 }
254 }
255}
256
257
258/**
259 * Invalidates the RAM range TLBs.
260 *
261 * @param pVM Pointer to the VM.
262 */
263void pgmPhysInvalidRamRangeTlbs(PVM pVM)
264{
265 pgmLock(pVM);
266 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
267 {
268 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
269 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
270 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
271 }
272 pgmUnlock(pVM);
273}
274
275
276/**
277 * Tests if a value of type RTGCPHYS is negative if the type had been signed
278 * instead of unsigned.
279 *
280 * @returns @c true if negative, @c false if positive or zero.
281 * @param a_GCPhys The value to test.
282 * @todo Move me to iprt/types.h.
283 */
284#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
285
286
287/**
288 * Slow worker for pgmPhysGetRange.
289 *
290 * @copydoc pgmPhysGetRange
291 */
292PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
293{
294 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
295
296 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
297 while (pRam)
298 {
299 RTGCPHYS off = GCPhys - pRam->GCPhys;
300 if (off < pRam->cb)
301 {
302 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
303 return pRam;
304 }
305 if (RTGCPHYS_IS_NEGATIVE(off))
306 pRam = pRam->CTX_SUFF(pLeft);
307 else
308 pRam = pRam->CTX_SUFF(pRight);
309 }
310 return NULL;
311}
312
313
314/**
315 * Slow worker for pgmPhysGetRangeAtOrAbove.
316 *
317 * @copydoc pgmPhysGetRangeAtOrAbove
318 */
319PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
320{
321 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
322
323 PPGMRAMRANGE pLastLeft = NULL;
324 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
325 while (pRam)
326 {
327 RTGCPHYS off = GCPhys - pRam->GCPhys;
328 if (off < pRam->cb)
329 {
330 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
331 return pRam;
332 }
333 if (RTGCPHYS_IS_NEGATIVE(off))
334 {
335 pLastLeft = pRam;
336 pRam = pRam->CTX_SUFF(pLeft);
337 }
338 else
339 pRam = pRam->CTX_SUFF(pRight);
340 }
341 return pLastLeft;
342}
343
344
345/**
346 * Slow worker for pgmPhysGetPage.
347 *
348 * @copydoc pgmPhysGetPage
349 */
350PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
351{
352 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
353
354 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
355 while (pRam)
356 {
357 RTGCPHYS off = GCPhys - pRam->GCPhys;
358 if (off < pRam->cb)
359 {
360 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
361 return &pRam->aPages[off >> PAGE_SHIFT];
362 }
363
364 if (RTGCPHYS_IS_NEGATIVE(off))
365 pRam = pRam->CTX_SUFF(pLeft);
366 else
367 pRam = pRam->CTX_SUFF(pRight);
368 }
369 return NULL;
370}
371
372
373/**
374 * Slow worker for pgmPhysGetPageEx.
375 *
376 * @copydoc pgmPhysGetPageEx
377 */
378int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
379{
380 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
381
382 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
383 while (pRam)
384 {
385 RTGCPHYS off = GCPhys - pRam->GCPhys;
386 if (off < pRam->cb)
387 {
388 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
389 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
390 return VINF_SUCCESS;
391 }
392
393 if (RTGCPHYS_IS_NEGATIVE(off))
394 pRam = pRam->CTX_SUFF(pLeft);
395 else
396 pRam = pRam->CTX_SUFF(pRight);
397 }
398
399 *ppPage = NULL;
400 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
401}
402
403
404/**
405 * Slow worker for pgmPhysGetPageAndRangeEx.
406 *
407 * @copydoc pgmPhysGetPageAndRangeEx
408 */
409int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
410{
411 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
412
413 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
414 while (pRam)
415 {
416 RTGCPHYS off = GCPhys - pRam->GCPhys;
417 if (off < pRam->cb)
418 {
419 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
420 *ppRam = pRam;
421 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
422 return VINF_SUCCESS;
423 }
424
425 if (RTGCPHYS_IS_NEGATIVE(off))
426 pRam = pRam->CTX_SUFF(pLeft);
427 else
428 pRam = pRam->CTX_SUFF(pRight);
429 }
430
431 *ppRam = NULL;
432 *ppPage = NULL;
433 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
434}
435
436
437/**
438 * Checks if Address Gate 20 is enabled or not.
439 *
440 * @returns true if enabled.
441 * @returns false if disabled.
442 * @param pVCpu Pointer to the VMCPU.
443 */
444VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
445{
446 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
447 return pVCpu->pgm.s.fA20Enabled;
448}
449
450
451/**
452 * Validates a GC physical address.
453 *
454 * @returns true if valid.
455 * @returns false if invalid.
456 * @param pVM Pointer to the VM.
457 * @param GCPhys The physical address to validate.
458 */
459VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
460{
461 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
462 return pPage != NULL;
463}
464
465
466/**
467 * Checks if a GC physical address is a normal page,
468 * i.e. not ROM, MMIO or reserved.
469 *
470 * @returns true if normal.
471 * @returns false if invalid, ROM, MMIO or reserved page.
472 * @param pVM Pointer to the VM.
473 * @param GCPhys The physical address to check.
474 */
475VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
476{
477 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
478 return pPage
479 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
480}
481
482
483/**
484 * Converts a GC physical address to a HC physical address.
485 *
486 * @returns VINF_SUCCESS on success.
487 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
488 * page but has no physical backing.
489 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
490 * GC physical address.
491 *
492 * @param pVM Pointer to the VM.
493 * @param GCPhys The GC physical address to convert.
494 * @param pHCPhys Where to store the HC physical address on success.
495 */
496VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
497{
498 pgmLock(pVM);
499 PPGMPAGE pPage;
500 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
501 if (RT_SUCCESS(rc))
502 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
503 pgmUnlock(pVM);
504 return rc;
505}
506
507
508/**
509 * Invalidates all page mapping TLBs.
510 *
511 * @param pVM Pointer to the VM.
512 */
513void pgmPhysInvalidatePageMapTLB(PVM pVM)
514{
515 pgmLock(pVM);
516 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
517
518 /* Clear the shared R0/R3 TLB completely. */
519 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
520 {
521 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
522 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
523 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
524 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
525 }
526
527 /** @todo clear the RC TLB whenever we add it. */
528
529 pgmUnlock(pVM);
530}
531
532
533/**
534 * Invalidates a page mapping TLB entry
535 *
536 * @param pVM Pointer to the VM.
537 * @param GCPhys GCPhys entry to flush
538 */
539void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
540{
541 PGM_LOCK_ASSERT_OWNER(pVM);
542
543 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
544
545#ifdef IN_RC
546 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
547 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
548 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
549 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
550 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
551#else
552 /* Clear the shared R0/R3 TLB entry. */
553 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
554 pTlbe->GCPhys = NIL_RTGCPHYS;
555 pTlbe->pPage = 0;
556 pTlbe->pMap = 0;
557 pTlbe->pv = 0;
558#endif
559
560 /** @todo clear the RC TLB whenever we add it. */
561}
562
563/**
564 * Makes sure that there is at least one handy page ready for use.
565 *
566 * This will also take the appropriate actions when reaching water-marks.
567 *
568 * @returns VBox status code.
569 * @retval VINF_SUCCESS on success.
570 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
571 *
572 * @param pVM Pointer to the VM.
573 *
574 * @remarks Must be called from within the PGM critical section. It may
575 * nip back to ring-3/0 in some cases.
576 */
577static int pgmPhysEnsureHandyPage(PVM pVM)
578{
579 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
580
581 /*
582 * Do we need to do anything special?
583 */
584#ifdef IN_RING3
585 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
586#else
587 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
588#endif
589 {
590 /*
591 * Allocate pages only if we're out of them, or in ring-3, almost out.
592 */
593#ifdef IN_RING3
594 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
595#else
596 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
597#endif
598 {
599 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
600 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
601#ifdef IN_RING3
602 int rc = PGMR3PhysAllocateHandyPages(pVM);
603#else
604 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
605#endif
606 if (RT_UNLIKELY(rc != VINF_SUCCESS))
607 {
608 if (RT_FAILURE(rc))
609 return rc;
610 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
611 if (!pVM->pgm.s.cHandyPages)
612 {
613 LogRel(("PGM: no more handy pages!\n"));
614 return VERR_EM_NO_MEMORY;
615 }
616 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
617 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
618#ifdef IN_RING3
619# ifdef VBOX_WITH_REM
620 REMR3NotifyFF(pVM);
621# endif
622#else
623 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
624#endif
625 }
626 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
627 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
628 ("%u\n", pVM->pgm.s.cHandyPages),
629 VERR_PGM_HANDY_PAGE_IPE);
630 }
631 else
632 {
633 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
634 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
635#ifndef IN_RING3
636 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
637 {
638 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
639 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
640 }
641#endif
642 }
643 }
644
645 return VINF_SUCCESS;
646}
647
648
649/**
650 * Replace a zero or shared page with new page that we can write to.
651 *
652 * @returns The following VBox status codes.
653 * @retval VINF_SUCCESS on success, pPage is modified.
654 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
655 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
656 *
657 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
658 *
659 * @param pVM Pointer to the VM.
660 * @param pPage The physical page tracking structure. This will
661 * be modified on success.
662 * @param GCPhys The address of the page.
663 *
664 * @remarks Must be called from within the PGM critical section. It may
665 * nip back to ring-3/0 in some cases.
666 *
667 * @remarks This function shouldn't really fail, however if it does
668 * it probably means we've screwed up the size of handy pages and/or
669 * the low-water mark. Or, that some device I/O is causing a lot of
670 * pages to be allocated while while the host is in a low-memory
671 * condition. This latter should be handled elsewhere and in a more
672 * controlled manner, it's on the @bugref{3170} todo list...
673 */
674int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
675{
676 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
677
678 /*
679 * Prereqs.
680 */
681 PGM_LOCK_ASSERT_OWNER(pVM);
682 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
683 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
684
685# ifdef PGM_WITH_LARGE_PAGES
686 /*
687 * Try allocate a large page if applicable.
688 */
689 if ( PGMIsUsingLargePages(pVM)
690 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
691 {
692 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
693 PPGMPAGE pBasePage;
694
695 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
696 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
697 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
698 {
699 rc = pgmPhysAllocLargePage(pVM, GCPhys);
700 if (rc == VINF_SUCCESS)
701 return rc;
702 }
703 /* Mark the base as type page table, so we don't check over and over again. */
704 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
705
706 /* fall back to 4KB pages. */
707 }
708# endif
709
710 /*
711 * Flush any shadow page table mappings of the page.
712 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
713 */
714 bool fFlushTLBs = false;
715 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
716 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
717
718 /*
719 * Ensure that we've got a page handy, take it and use it.
720 */
721 int rc2 = pgmPhysEnsureHandyPage(pVM);
722 if (RT_FAILURE(rc2))
723 {
724 if (fFlushTLBs)
725 PGM_INVL_ALL_VCPU_TLBS(pVM);
726 Assert(rc2 == VERR_EM_NO_MEMORY);
727 return rc2;
728 }
729 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
730 PGM_LOCK_ASSERT_OWNER(pVM);
731 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
732 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
733
734 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
735 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
736 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
737 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
738 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
739 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
740
741 /*
742 * There are one or two action to be taken the next time we allocate handy pages:
743 * - Tell the GMM (global memory manager) what the page is being used for.
744 * (Speeds up replacement operations - sharing and defragmenting.)
745 * - If the current backing is shared, it must be freed.
746 */
747 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
748 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
749
750 void const *pvSharedPage = NULL;
751 if (PGM_PAGE_IS_SHARED(pPage))
752 {
753 /* Mark this shared page for freeing/dereferencing. */
754 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
755 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
756
757 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
758 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
759 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
760 pVM->pgm.s.cSharedPages--;
761
762 /* Grab the address of the page so we can make a copy later on. (safe) */
763 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
764 AssertRC(rc);
765 }
766 else
767 {
768 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
769 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
770 pVM->pgm.s.cZeroPages--;
771 }
772
773 /*
774 * Do the PGMPAGE modifications.
775 */
776 pVM->pgm.s.cPrivatePages++;
777 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
778 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
779 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
780 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
781 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
782
783 /* Copy the shared page contents to the replacement page. */
784 if (pvSharedPage)
785 {
786 /* Get the virtual address of the new page. */
787 PGMPAGEMAPLOCK PgMpLck;
788 void *pvNewPage;
789 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
790 if (RT_SUCCESS(rc))
791 {
792 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
793 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
794 }
795 }
796
797 if ( fFlushTLBs
798 && rc != VINF_PGM_GCPHYS_ALIASED)
799 PGM_INVL_ALL_VCPU_TLBS(pVM);
800 return rc;
801}
802
803#ifdef PGM_WITH_LARGE_PAGES
804
805/**
806 * Replace a 2 MB range of zero pages with new pages that we can write to.
807 *
808 * @returns The following VBox status codes.
809 * @retval VINF_SUCCESS on success, pPage is modified.
810 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
811 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
812 *
813 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
814 *
815 * @param pVM Pointer to the VM.
816 * @param GCPhys The address of the page.
817 *
818 * @remarks Must be called from within the PGM critical section. It may
819 * nip back to ring-3/0 in some cases.
820 */
821int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
822{
823 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
824 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
825
826 /*
827 * Prereqs.
828 */
829 PGM_LOCK_ASSERT_OWNER(pVM);
830 Assert(PGMIsUsingLargePages(pVM));
831
832 PPGMPAGE pFirstPage;
833 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
834 if ( RT_SUCCESS(rc)
835 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
836 {
837 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
838
839 /* Don't call this function for already allocated pages. */
840 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
841
842 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
843 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
844 {
845 /* Lazy approach: check all pages in the 2 MB range.
846 * The whole range must be ram and unallocated. */
847 GCPhys = GCPhysBase;
848 unsigned iPage;
849 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
850 {
851 PPGMPAGE pSubPage;
852 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
853 if ( RT_FAILURE(rc)
854 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
855 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
856 {
857 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
858 break;
859 }
860 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
861 GCPhys += PAGE_SIZE;
862 }
863 if (iPage != _2M/PAGE_SIZE)
864 {
865 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
866 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
867 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
868 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
869 }
870
871 /*
872 * Do the allocation.
873 */
874# ifdef IN_RING3
875 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
876# else
877 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
878# endif
879 if (RT_SUCCESS(rc))
880 {
881 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
882 pVM->pgm.s.cLargePages++;
883 return VINF_SUCCESS;
884 }
885
886 /* If we fail once, it most likely means the host's memory is too
887 fragmented; don't bother trying again. */
888 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
889 PGMSetLargePageUsage(pVM, false);
890 return rc;
891 }
892 }
893 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
894}
895
896
897/**
898 * Recheck the entire 2 MB range to see if we can use it again as a large page.
899 *
900 * @returns The following VBox status codes.
901 * @retval VINF_SUCCESS on success, the large page can be used again
902 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
903 *
904 * @param pVM Pointer to the VM.
905 * @param GCPhys The address of the page.
906 * @param pLargePage Page structure of the base page
907 */
908int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
909{
910 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
911
912 GCPhys &= X86_PDE2M_PAE_PG_MASK;
913
914 /* Check the base page. */
915 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
916 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
917 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
918 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
919 {
920 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
921 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
922 }
923
924 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
925 /* Check all remaining pages in the 2 MB range. */
926 unsigned i;
927 GCPhys += PAGE_SIZE;
928 for (i = 1; i < _2M/PAGE_SIZE; i++)
929 {
930 PPGMPAGE pPage;
931 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
932 AssertRCBreak(rc);
933
934 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
935 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
936 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
937 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
938 {
939 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
940 break;
941 }
942
943 GCPhys += PAGE_SIZE;
944 }
945 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
946
947 if (i == _2M/PAGE_SIZE)
948 {
949 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
950 pVM->pgm.s.cLargePagesDisabled--;
951 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
952 return VINF_SUCCESS;
953 }
954
955 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
956}
957
958#endif /* PGM_WITH_LARGE_PAGES */
959
960/**
961 * Deal with a write monitored page.
962 *
963 * @returns VBox strict status code.
964 *
965 * @param pVM Pointer to the VM.
966 * @param pPage The physical page tracking structure.
967 *
968 * @remarks Called from within the PGM critical section.
969 */
970void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
971{
972 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
973 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
974 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
975 Assert(pVM->pgm.s.cMonitoredPages > 0);
976 pVM->pgm.s.cMonitoredPages--;
977 pVM->pgm.s.cWrittenToPages++;
978}
979
980
981/**
982 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
983 *
984 * @returns VBox strict status code.
985 * @retval VINF_SUCCESS on success.
986 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
987 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
988 *
989 * @param pVM Pointer to the VM.
990 * @param pPage The physical page tracking structure.
991 * @param GCPhys The address of the page.
992 *
993 * @remarks Called from within the PGM critical section.
994 */
995int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
996{
997 PGM_LOCK_ASSERT_OWNER(pVM);
998 switch (PGM_PAGE_GET_STATE(pPage))
999 {
1000 case PGM_PAGE_STATE_WRITE_MONITORED:
1001 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
1002 /* fall thru */
1003 default: /* to shut up GCC */
1004 case PGM_PAGE_STATE_ALLOCATED:
1005 return VINF_SUCCESS;
1006
1007 /*
1008 * Zero pages can be dummy pages for MMIO or reserved memory,
1009 * so we need to check the flags before joining cause with
1010 * shared page replacement.
1011 */
1012 case PGM_PAGE_STATE_ZERO:
1013 if (PGM_PAGE_IS_MMIO(pPage))
1014 return VERR_PGM_PHYS_PAGE_RESERVED;
1015 /* fall thru */
1016 case PGM_PAGE_STATE_SHARED:
1017 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1018
1019 /* Not allowed to write to ballooned pages. */
1020 case PGM_PAGE_STATE_BALLOONED:
1021 return VERR_PGM_PHYS_PAGE_BALLOONED;
1022 }
1023}
1024
1025
1026/**
1027 * Internal usage: Map the page specified by its GMM ID.
1028 *
1029 * This is similar to pgmPhysPageMap
1030 *
1031 * @returns VBox status code.
1032 *
1033 * @param pVM Pointer to the VM.
1034 * @param idPage The Page ID.
1035 * @param HCPhys The physical address (for RC).
1036 * @param ppv Where to store the mapping address.
1037 *
1038 * @remarks Called from within the PGM critical section. The mapping is only
1039 * valid while you are inside this section.
1040 */
1041int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1042{
1043 /*
1044 * Validation.
1045 */
1046 PGM_LOCK_ASSERT_OWNER(pVM);
1047 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1048 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1049 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1050
1051#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1052 /*
1053 * Map it by HCPhys.
1054 */
1055 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1056
1057#else
1058 /*
1059 * Find/make Chunk TLB entry for the mapping chunk.
1060 */
1061 PPGMCHUNKR3MAP pMap;
1062 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1063 if (pTlbe->idChunk == idChunk)
1064 {
1065 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1066 pMap = pTlbe->pChunk;
1067 }
1068 else
1069 {
1070 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1071
1072 /*
1073 * Find the chunk, map it if necessary.
1074 */
1075 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1076 if (pMap)
1077 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1078 else
1079 {
1080# ifdef IN_RING0
1081 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1082 AssertRCReturn(rc, rc);
1083 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1084 Assert(pMap);
1085# else
1086 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1087 if (RT_FAILURE(rc))
1088 return rc;
1089# endif
1090 }
1091
1092 /*
1093 * Enter it into the Chunk TLB.
1094 */
1095 pTlbe->idChunk = idChunk;
1096 pTlbe->pChunk = pMap;
1097 }
1098
1099 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1100 return VINF_SUCCESS;
1101#endif
1102}
1103
1104
1105/**
1106 * Maps a page into the current virtual address space so it can be accessed.
1107 *
1108 * @returns VBox status code.
1109 * @retval VINF_SUCCESS on success.
1110 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1111 *
1112 * @param pVM Pointer to the VM.
1113 * @param pPage The physical page tracking structure.
1114 * @param GCPhys The address of the page.
1115 * @param ppMap Where to store the address of the mapping tracking structure.
1116 * @param ppv Where to store the mapping address of the page. The page
1117 * offset is masked off!
1118 *
1119 * @remarks Called from within the PGM critical section.
1120 */
1121static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1122{
1123 PGM_LOCK_ASSERT_OWNER(pVM);
1124 NOREF(GCPhys);
1125
1126#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1127 /*
1128 * Just some sketchy GC/R0-darwin code.
1129 */
1130 *ppMap = NULL;
1131 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1132 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1133 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1134 return VINF_SUCCESS;
1135
1136#else /* IN_RING3 || IN_RING0 */
1137
1138
1139 /*
1140 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1141 */
1142 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1143 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1144 {
1145 /* Decode the page id to a page in a MMIO2 ram range. */
1146 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1147 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1148 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1149 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1150 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1151 pPage->s.idPage, pPage->s.uStateY),
1152 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1153 PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1154 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1155 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1156 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1157 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1158 *ppMap = NULL;
1159 return VINF_SUCCESS;
1160 }
1161
1162 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1163 if (idChunk == NIL_GMM_CHUNKID)
1164 {
1165 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1166 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1167 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1168 {
1169 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1170 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1171 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1172 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1173 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1174 }
1175 else
1176 {
1177 static uint8_t s_abPlayItSafe[0x1000*2]; /* I don't dare return the zero page at the moment. */
1178 *ppv = (uint8_t *)((uintptr_t)&s_abPlayItSafe[0x1000] & ~(uintptr_t)0xfff);
1179 }
1180 *ppMap = NULL;
1181 return VINF_SUCCESS;
1182 }
1183
1184 /*
1185 * Find/make Chunk TLB entry for the mapping chunk.
1186 */
1187 PPGMCHUNKR3MAP pMap;
1188 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1189 if (pTlbe->idChunk == idChunk)
1190 {
1191 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1192 pMap = pTlbe->pChunk;
1193 AssertPtr(pMap->pv);
1194 }
1195 else
1196 {
1197 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1198
1199 /*
1200 * Find the chunk, map it if necessary.
1201 */
1202 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1203 if (pMap)
1204 {
1205 AssertPtr(pMap->pv);
1206 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1207 }
1208 else
1209 {
1210#ifdef IN_RING0
1211 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1212 AssertRCReturn(rc, rc);
1213 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1214 Assert(pMap);
1215#else
1216 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1217 if (RT_FAILURE(rc))
1218 return rc;
1219#endif
1220 AssertPtr(pMap->pv);
1221 }
1222
1223 /*
1224 * Enter it into the Chunk TLB.
1225 */
1226 pTlbe->idChunk = idChunk;
1227 pTlbe->pChunk = pMap;
1228 }
1229
1230 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1231 *ppMap = pMap;
1232 return VINF_SUCCESS;
1233#endif /* IN_RING3 */
1234}
1235
1236
1237/**
1238 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1239 *
1240 * This is typically used is paths where we cannot use the TLB methods (like ROM
1241 * pages) or where there is no point in using them since we won't get many hits.
1242 *
1243 * @returns VBox strict status code.
1244 * @retval VINF_SUCCESS on success.
1245 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1246 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1247 *
1248 * @param pVM Pointer to the VM.
1249 * @param pPage The physical page tracking structure.
1250 * @param GCPhys The address of the page.
1251 * @param ppv Where to store the mapping address of the page. The page
1252 * offset is masked off!
1253 *
1254 * @remarks Called from within the PGM critical section. The mapping is only
1255 * valid while you are inside section.
1256 */
1257int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1258{
1259 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1260 if (RT_SUCCESS(rc))
1261 {
1262 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1263 PPGMPAGEMAP pMapIgnore;
1264 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1265 if (RT_FAILURE(rc2)) /* preserve rc */
1266 rc = rc2;
1267 }
1268 return rc;
1269}
1270
1271
1272/**
1273 * Maps a page into the current virtual address space so it can be accessed for
1274 * both writing and reading.
1275 *
1276 * This is typically used is paths where we cannot use the TLB methods (like ROM
1277 * pages) or where there is no point in using them since we won't get many hits.
1278 *
1279 * @returns VBox status code.
1280 * @retval VINF_SUCCESS on success.
1281 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1282 *
1283 * @param pVM Pointer to the VM.
1284 * @param pPage The physical page tracking structure. Must be in the
1285 * allocated state.
1286 * @param GCPhys The address of the page.
1287 * @param ppv Where to store the mapping address of the page. The page
1288 * offset is masked off!
1289 *
1290 * @remarks Called from within the PGM critical section. The mapping is only
1291 * valid while you are inside section.
1292 */
1293int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1294{
1295 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1296 PPGMPAGEMAP pMapIgnore;
1297 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1298}
1299
1300
1301/**
1302 * Maps a page into the current virtual address space so it can be accessed for
1303 * reading.
1304 *
1305 * This is typically used is paths where we cannot use the TLB methods (like ROM
1306 * pages) or where there is no point in using them since we won't get many hits.
1307 *
1308 * @returns VBox status code.
1309 * @retval VINF_SUCCESS on success.
1310 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1311 *
1312 * @param pVM Pointer to the VM.
1313 * @param pPage The physical page tracking structure.
1314 * @param GCPhys The address of the page.
1315 * @param ppv Where to store the mapping address of the page. The page
1316 * offset is masked off!
1317 *
1318 * @remarks Called from within the PGM critical section. The mapping is only
1319 * valid while you are inside this section.
1320 */
1321int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1322{
1323 PPGMPAGEMAP pMapIgnore;
1324 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1325}
1326
1327#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1328
1329/**
1330 * Load a guest page into the ring-3 physical TLB.
1331 *
1332 * @returns VBox status code.
1333 * @retval VINF_SUCCESS on success
1334 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1335 * @param pPGM The PGM instance pointer.
1336 * @param GCPhys The guest physical address in question.
1337 */
1338int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1339{
1340 PGM_LOCK_ASSERT_OWNER(pVM);
1341
1342 /*
1343 * Find the ram range and page and hand it over to the with-page function.
1344 * 99.8% of requests are expected to be in the first range.
1345 */
1346 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1347 if (!pPage)
1348 {
1349 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1350 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1351 }
1352
1353 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1354}
1355
1356
1357/**
1358 * Load a guest page into the ring-3 physical TLB.
1359 *
1360 * @returns VBox status code.
1361 * @retval VINF_SUCCESS on success
1362 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1363 *
1364 * @param pVM Pointer to the VM.
1365 * @param pPage Pointer to the PGMPAGE structure corresponding to
1366 * GCPhys.
1367 * @param GCPhys The guest physical address in question.
1368 */
1369int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1370{
1371 PGM_LOCK_ASSERT_OWNER(pVM);
1372 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1373
1374 /*
1375 * Map the page.
1376 * Make a special case for the zero page as it is kind of special.
1377 */
1378 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1379 if ( !PGM_PAGE_IS_ZERO(pPage)
1380 && !PGM_PAGE_IS_BALLOONED(pPage))
1381 {
1382 void *pv;
1383 PPGMPAGEMAP pMap;
1384 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1385 if (RT_FAILURE(rc))
1386 return rc;
1387 pTlbe->pMap = pMap;
1388 pTlbe->pv = pv;
1389 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1390 }
1391 else
1392 {
1393 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1394 pTlbe->pMap = NULL;
1395 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1396 }
1397#ifdef PGM_WITH_PHYS_TLB
1398 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1399 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1400 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1401 else
1402 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1403#else
1404 pTlbe->GCPhys = NIL_RTGCPHYS;
1405#endif
1406 pTlbe->pPage = pPage;
1407 return VINF_SUCCESS;
1408}
1409
1410#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1411
1412/**
1413 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1414 * own the PGM lock and therefore not need to lock the mapped page.
1415 *
1416 * @returns VBox status code.
1417 * @retval VINF_SUCCESS on success.
1418 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1419 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1420 *
1421 * @param pVM Pointer to the VM.
1422 * @param GCPhys The guest physical address of the page that should be mapped.
1423 * @param pPage Pointer to the PGMPAGE structure for the page.
1424 * @param ppv Where to store the address corresponding to GCPhys.
1425 *
1426 * @internal
1427 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1428 */
1429int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1430{
1431 int rc;
1432 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1433 PGM_LOCK_ASSERT_OWNER(pVM);
1434 pVM->pgm.s.cDeprecatedPageLocks++;
1435
1436 /*
1437 * Make sure the page is writable.
1438 */
1439 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1440 {
1441 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1442 if (RT_FAILURE(rc))
1443 return rc;
1444 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1445 }
1446 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1447
1448 /*
1449 * Get the mapping address.
1450 */
1451#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1452 void *pv;
1453 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1454 PGM_PAGE_GET_HCPHYS(pPage),
1455 &pv
1456 RTLOG_COMMA_SRC_POS);
1457 if (RT_FAILURE(rc))
1458 return rc;
1459 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1460#else
1461 PPGMPAGEMAPTLBE pTlbe;
1462 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1463 if (RT_FAILURE(rc))
1464 return rc;
1465 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1466#endif
1467 return VINF_SUCCESS;
1468}
1469
1470#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1471
1472/**
1473 * Locks a page mapping for writing.
1474 *
1475 * @param pVM Pointer to the VM.
1476 * @param pPage The page.
1477 * @param pTlbe The mapping TLB entry for the page.
1478 * @param pLock The lock structure (output).
1479 */
1480DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1481{
1482 PPGMPAGEMAP pMap = pTlbe->pMap;
1483 if (pMap)
1484 pMap->cRefs++;
1485
1486 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1487 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1488 {
1489 if (cLocks == 0)
1490 pVM->pgm.s.cWriteLockedPages++;
1491 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1492 }
1493 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1494 {
1495 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1496 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1497 if (pMap)
1498 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1499 }
1500
1501 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1502 pLock->pvMap = pMap;
1503}
1504
1505/**
1506 * Locks a page mapping for reading.
1507 *
1508 * @param pVM Pointer to the VM.
1509 * @param pPage The page.
1510 * @param pTlbe The mapping TLB entry for the page.
1511 * @param pLock The lock structure (output).
1512 */
1513DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1514{
1515 PPGMPAGEMAP pMap = pTlbe->pMap;
1516 if (pMap)
1517 pMap->cRefs++;
1518
1519 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1520 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1521 {
1522 if (cLocks == 0)
1523 pVM->pgm.s.cReadLockedPages++;
1524 PGM_PAGE_INC_READ_LOCKS(pPage);
1525 }
1526 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1527 {
1528 PGM_PAGE_INC_READ_LOCKS(pPage);
1529 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1530 if (pMap)
1531 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1532 }
1533
1534 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1535 pLock->pvMap = pMap;
1536}
1537
1538#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1539
1540
1541/**
1542 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1543 * own the PGM lock and have access to the page structure.
1544 *
1545 * @returns VBox status code.
1546 * @retval VINF_SUCCESS on success.
1547 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1548 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1549 *
1550 * @param pVM Pointer to the VM.
1551 * @param GCPhys The guest physical address of the page that should be mapped.
1552 * @param pPage Pointer to the PGMPAGE structure for the page.
1553 * @param ppv Where to store the address corresponding to GCPhys.
1554 * @param pLock Where to store the lock information that
1555 * pgmPhysReleaseInternalPageMappingLock needs.
1556 *
1557 * @internal
1558 */
1559int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1560{
1561 int rc;
1562 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1563 PGM_LOCK_ASSERT_OWNER(pVM);
1564
1565 /*
1566 * Make sure the page is writable.
1567 */
1568 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1569 {
1570 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1571 if (RT_FAILURE(rc))
1572 return rc;
1573 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1574 }
1575 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1576
1577 /*
1578 * Do the job.
1579 */
1580#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1581 void *pv;
1582 PVMCPU pVCpu = VMMGetCpu(pVM);
1583 rc = pgmRZDynMapHCPageInlined(pVCpu,
1584 PGM_PAGE_GET_HCPHYS(pPage),
1585 &pv
1586 RTLOG_COMMA_SRC_POS);
1587 if (RT_FAILURE(rc))
1588 return rc;
1589 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1590 pLock->pvPage = pv;
1591 pLock->pVCpu = pVCpu;
1592
1593#else
1594 PPGMPAGEMAPTLBE pTlbe;
1595 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1596 if (RT_FAILURE(rc))
1597 return rc;
1598 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1599 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1600#endif
1601 return VINF_SUCCESS;
1602}
1603
1604
1605/**
1606 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1607 * own the PGM lock and have access to the page structure.
1608 *
1609 * @returns VBox status code.
1610 * @retval VINF_SUCCESS on success.
1611 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1612 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1613 *
1614 * @param pVM Pointer to the VM.
1615 * @param GCPhys The guest physical address of the page that should be mapped.
1616 * @param pPage Pointer to the PGMPAGE structure for the page.
1617 * @param ppv Where to store the address corresponding to GCPhys.
1618 * @param pLock Where to store the lock information that
1619 * pgmPhysReleaseInternalPageMappingLock needs.
1620 *
1621 * @internal
1622 */
1623int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1624{
1625 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1626 PGM_LOCK_ASSERT_OWNER(pVM);
1627 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1628
1629 /*
1630 * Do the job.
1631 */
1632#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1633 void *pv;
1634 PVMCPU pVCpu = VMMGetCpu(pVM);
1635 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1636 PGM_PAGE_GET_HCPHYS(pPage),
1637 &pv
1638 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1639 if (RT_FAILURE(rc))
1640 return rc;
1641 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1642 pLock->pvPage = pv;
1643 pLock->pVCpu = pVCpu;
1644
1645#else
1646 PPGMPAGEMAPTLBE pTlbe;
1647 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1648 if (RT_FAILURE(rc))
1649 return rc;
1650 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1651 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1652#endif
1653 return VINF_SUCCESS;
1654}
1655
1656
1657/**
1658 * Requests the mapping of a guest page into the current context.
1659 *
1660 * This API should only be used for very short term, as it will consume scarse
1661 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1662 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1663 *
1664 * This API will assume your intention is to write to the page, and will
1665 * therefore replace shared and zero pages. If you do not intend to modify
1666 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1667 *
1668 * @returns VBox status code.
1669 * @retval VINF_SUCCESS on success.
1670 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1671 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1672 *
1673 * @param pVM Pointer to the VM.
1674 * @param GCPhys The guest physical address of the page that should be
1675 * mapped.
1676 * @param ppv Where to store the address corresponding to GCPhys.
1677 * @param pLock Where to store the lock information that
1678 * PGMPhysReleasePageMappingLock needs.
1679 *
1680 * @remarks The caller is responsible for dealing with access handlers.
1681 * @todo Add an informational return code for pages with access handlers?
1682 *
1683 * @remark Avoid calling this API from within critical sections (other than
1684 * the PGM one) because of the deadlock risk. External threads may
1685 * need to delegate jobs to the EMTs.
1686 * @remarks Only one page is mapped! Make no assumption about what's after or
1687 * before the returned page!
1688 * @thread Any thread.
1689 */
1690VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1691{
1692 int rc = pgmLock(pVM);
1693 AssertRCReturn(rc, rc);
1694
1695#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1696 /*
1697 * Find the page and make sure it's writable.
1698 */
1699 PPGMPAGE pPage;
1700 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1701 if (RT_SUCCESS(rc))
1702 {
1703 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1704 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1705 if (RT_SUCCESS(rc))
1706 {
1707 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1708
1709 PVMCPU pVCpu = VMMGetCpu(pVM);
1710 void *pv;
1711 rc = pgmRZDynMapHCPageInlined(pVCpu,
1712 PGM_PAGE_GET_HCPHYS(pPage),
1713 &pv
1714 RTLOG_COMMA_SRC_POS);
1715 if (RT_SUCCESS(rc))
1716 {
1717 AssertRCSuccess(rc);
1718
1719 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1720 *ppv = pv;
1721 pLock->pvPage = pv;
1722 pLock->pVCpu = pVCpu;
1723 }
1724 }
1725 }
1726
1727#else /* IN_RING3 || IN_RING0 */
1728 /*
1729 * Query the Physical TLB entry for the page (may fail).
1730 */
1731 PPGMPAGEMAPTLBE pTlbe;
1732 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1733 if (RT_SUCCESS(rc))
1734 {
1735 /*
1736 * If the page is shared, the zero page, or being write monitored
1737 * it must be converted to a page that's writable if possible.
1738 */
1739 PPGMPAGE pPage = pTlbe->pPage;
1740 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1741 {
1742 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1743 if (RT_SUCCESS(rc))
1744 {
1745 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1746 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1747 }
1748 }
1749 if (RT_SUCCESS(rc))
1750 {
1751 /*
1752 * Now, just perform the locking and calculate the return address.
1753 */
1754 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1755 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1756 }
1757 }
1758
1759#endif /* IN_RING3 || IN_RING0 */
1760 pgmUnlock(pVM);
1761 return rc;
1762}
1763
1764
1765/**
1766 * Requests the mapping of a guest page into the current context.
1767 *
1768 * This API should only be used for very short term, as it will consume scarse
1769 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1770 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1771 *
1772 * @returns VBox status code.
1773 * @retval VINF_SUCCESS on success.
1774 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1775 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1776 *
1777 * @param pVM Pointer to the VM.
1778 * @param GCPhys The guest physical address of the page that should be
1779 * mapped.
1780 * @param ppv Where to store the address corresponding to GCPhys.
1781 * @param pLock Where to store the lock information that
1782 * PGMPhysReleasePageMappingLock needs.
1783 *
1784 * @remarks The caller is responsible for dealing with access handlers.
1785 * @todo Add an informational return code for pages with access handlers?
1786 *
1787 * @remarks Avoid calling this API from within critical sections (other than
1788 * the PGM one) because of the deadlock risk.
1789 * @remarks Only one page is mapped! Make no assumption about what's after or
1790 * before the returned page!
1791 * @thread Any thread.
1792 */
1793VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1794{
1795 int rc = pgmLock(pVM);
1796 AssertRCReturn(rc, rc);
1797
1798#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1799 /*
1800 * Find the page and make sure it's readable.
1801 */
1802 PPGMPAGE pPage;
1803 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1804 if (RT_SUCCESS(rc))
1805 {
1806 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1807 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1808 else
1809 {
1810 PVMCPU pVCpu = VMMGetCpu(pVM);
1811 void *pv;
1812 rc = pgmRZDynMapHCPageInlined(pVCpu,
1813 PGM_PAGE_GET_HCPHYS(pPage),
1814 &pv
1815 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1816 if (RT_SUCCESS(rc))
1817 {
1818 AssertRCSuccess(rc);
1819
1820 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1821 *ppv = pv;
1822 pLock->pvPage = pv;
1823 pLock->pVCpu = pVCpu;
1824 }
1825 }
1826 }
1827
1828#else /* IN_RING3 || IN_RING0 */
1829 /*
1830 * Query the Physical TLB entry for the page (may fail).
1831 */
1832 PPGMPAGEMAPTLBE pTlbe;
1833 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1834 if (RT_SUCCESS(rc))
1835 {
1836 /* MMIO pages doesn't have any readable backing. */
1837 PPGMPAGE pPage = pTlbe->pPage;
1838 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1839 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1840 else
1841 {
1842 /*
1843 * Now, just perform the locking and calculate the return address.
1844 */
1845 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1846 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1847 }
1848 }
1849
1850#endif /* IN_RING3 || IN_RING0 */
1851 pgmUnlock(pVM);
1852 return rc;
1853}
1854
1855
1856/**
1857 * Requests the mapping of a guest page given by virtual address into the current context.
1858 *
1859 * This API should only be used for very short term, as it will consume
1860 * scarse resources (R0 and GC) in the mapping cache. When you're done
1861 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1862 *
1863 * This API will assume your intention is to write to the page, and will
1864 * therefore replace shared and zero pages. If you do not intend to modify
1865 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1866 *
1867 * @returns VBox status code.
1868 * @retval VINF_SUCCESS on success.
1869 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1870 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1871 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1872 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1873 *
1874 * @param pVCpu Pointer to the VMCPU.
1875 * @param GCPhys The guest physical address of the page that should be mapped.
1876 * @param ppv Where to store the address corresponding to GCPhys.
1877 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1878 *
1879 * @remark Avoid calling this API from within critical sections (other than
1880 * the PGM one) because of the deadlock risk.
1881 * @thread EMT
1882 */
1883VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1884{
1885 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1886 RTGCPHYS GCPhys;
1887 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1888 if (RT_SUCCESS(rc))
1889 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1890 return rc;
1891}
1892
1893
1894/**
1895 * Requests the mapping of a guest page given by virtual address into the current context.
1896 *
1897 * This API should only be used for very short term, as it will consume
1898 * scarse resources (R0 and GC) in the mapping cache. When you're done
1899 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1900 *
1901 * @returns VBox status code.
1902 * @retval VINF_SUCCESS on success.
1903 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1904 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1905 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1906 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1907 *
1908 * @param pVCpu Pointer to the VMCPU.
1909 * @param GCPhys The guest physical address of the page that should be mapped.
1910 * @param ppv Where to store the address corresponding to GCPhys.
1911 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1912 *
1913 * @remark Avoid calling this API from within critical sections (other than
1914 * the PGM one) because of the deadlock risk.
1915 * @thread EMT
1916 */
1917VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1918{
1919 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1920 RTGCPHYS GCPhys;
1921 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1922 if (RT_SUCCESS(rc))
1923 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1924 return rc;
1925}
1926
1927
1928/**
1929 * Release the mapping of a guest page.
1930 *
1931 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1932 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1933 *
1934 * @param pVM Pointer to the VM.
1935 * @param pLock The lock structure initialized by the mapping function.
1936 */
1937VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1938{
1939#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1940 Assert(pLock->pvPage != NULL);
1941 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1942 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1943 pLock->pVCpu = NULL;
1944 pLock->pvPage = NULL;
1945
1946#else
1947 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1948 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1949 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1950
1951 pLock->uPageAndType = 0;
1952 pLock->pvMap = NULL;
1953
1954 pgmLock(pVM);
1955 if (fWriteLock)
1956 {
1957 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1958 Assert(cLocks > 0);
1959 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1960 {
1961 if (cLocks == 1)
1962 {
1963 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1964 pVM->pgm.s.cWriteLockedPages--;
1965 }
1966 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1967 }
1968
1969 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1970 {
1971 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1972 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1973 Assert(pVM->pgm.s.cMonitoredPages > 0);
1974 pVM->pgm.s.cMonitoredPages--;
1975 pVM->pgm.s.cWrittenToPages++;
1976 }
1977 }
1978 else
1979 {
1980 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1981 Assert(cLocks > 0);
1982 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1983 {
1984 if (cLocks == 1)
1985 {
1986 Assert(pVM->pgm.s.cReadLockedPages > 0);
1987 pVM->pgm.s.cReadLockedPages--;
1988 }
1989 PGM_PAGE_DEC_READ_LOCKS(pPage);
1990 }
1991 }
1992
1993 if (pMap)
1994 {
1995 Assert(pMap->cRefs >= 1);
1996 pMap->cRefs--;
1997 }
1998 pgmUnlock(pVM);
1999#endif /* IN_RING3 */
2000}
2001
2002
2003/**
2004 * Release the internal mapping of a guest page.
2005 *
2006 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2007 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2008 *
2009 * @param pVM Pointer to the VM.
2010 * @param pLock The lock structure initialized by the mapping function.
2011 *
2012 * @remarks Caller must hold the PGM lock.
2013 */
2014void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
2015{
2016 PGM_LOCK_ASSERT_OWNER(pVM);
2017 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2018}
2019
2020
2021/**
2022 * Converts a GC physical address to a HC ring-3 pointer.
2023 *
2024 * @returns VINF_SUCCESS on success.
2025 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2026 * page but has no physical backing.
2027 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2028 * GC physical address.
2029 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2030 * a dynamic ram chunk boundary
2031 *
2032 * @param pVM Pointer to the VM.
2033 * @param GCPhys The GC physical address to convert.
2034 * @param pR3Ptr Where to store the R3 pointer on success.
2035 *
2036 * @deprecated Avoid when possible!
2037 */
2038int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2039{
2040/** @todo this is kind of hacky and needs some more work. */
2041#ifndef DEBUG_sandervl
2042 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2043#endif
2044
2045 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2046#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2047 NOREF(pVM); NOREF(pR3Ptr);
2048 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2049#else
2050 pgmLock(pVM);
2051
2052 PPGMRAMRANGE pRam;
2053 PPGMPAGE pPage;
2054 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2055 if (RT_SUCCESS(rc))
2056 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2057
2058 pgmUnlock(pVM);
2059 Assert(rc <= VINF_SUCCESS);
2060 return rc;
2061#endif
2062}
2063
2064#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
2065
2066/**
2067 * Maps and locks a guest CR3 or PD (PAE) page.
2068 *
2069 * @returns VINF_SUCCESS on success.
2070 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2071 * page but has no physical backing.
2072 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2073 * GC physical address.
2074 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2075 * a dynamic ram chunk boundary
2076 *
2077 * @param pVM Pointer to the VM.
2078 * @param GCPhys The GC physical address to convert.
2079 * @param pR3Ptr Where to store the R3 pointer on success. This may or
2080 * may not be valid in ring-0 depending on the
2081 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
2082 *
2083 * @remarks The caller must own the PGM lock.
2084 */
2085int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2086{
2087
2088 PPGMRAMRANGE pRam;
2089 PPGMPAGE pPage;
2090 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2091 if (RT_SUCCESS(rc))
2092 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2093 Assert(rc <= VINF_SUCCESS);
2094 return rc;
2095}
2096
2097
2098int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2099{
2100
2101}
2102
2103#endif
2104
2105/**
2106 * Converts a guest pointer to a GC physical address.
2107 *
2108 * This uses the current CR3/CR0/CR4 of the guest.
2109 *
2110 * @returns VBox status code.
2111 * @param pVCpu Pointer to the VMCPU.
2112 * @param GCPtr The guest pointer to convert.
2113 * @param pGCPhys Where to store the GC physical address.
2114 */
2115VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2116{
2117 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2118 if (pGCPhys && RT_SUCCESS(rc))
2119 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2120 return rc;
2121}
2122
2123
2124/**
2125 * Converts a guest pointer to a HC physical address.
2126 *
2127 * This uses the current CR3/CR0/CR4 of the guest.
2128 *
2129 * @returns VBox status code.
2130 * @param pVCpu Pointer to the VMCPU.
2131 * @param GCPtr The guest pointer to convert.
2132 * @param pHCPhys Where to store the HC physical address.
2133 */
2134VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2135{
2136 PVM pVM = pVCpu->CTX_SUFF(pVM);
2137 RTGCPHYS GCPhys;
2138 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2139 if (RT_SUCCESS(rc))
2140 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2141 return rc;
2142}
2143
2144
2145
2146#undef LOG_GROUP
2147#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2148
2149
2150#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2151/**
2152 * Cache PGMPhys memory access
2153 *
2154 * @param pVM Pointer to the VM.
2155 * @param pCache Cache structure pointer
2156 * @param GCPhys GC physical address
2157 * @param pbHC HC pointer corresponding to physical page
2158 *
2159 * @thread EMT.
2160 */
2161static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2162{
2163 uint32_t iCacheIndex;
2164
2165 Assert(VM_IS_EMT(pVM));
2166
2167 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2168 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2169
2170 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2171
2172 ASMBitSet(&pCache->aEntries, iCacheIndex);
2173
2174 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2175 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2176}
2177#endif /* IN_RING3 */
2178
2179
2180/**
2181 * Deals with reading from a page with one or more ALL access handlers.
2182 *
2183 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2184 * @retval VINF_SUCCESS.
2185 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3 or with
2186 * PGMACCESSORIGIN_IEM.
2187 *
2188 * @param pVM Pointer to the VM.
2189 * @param pPage The page descriptor.
2190 * @param GCPhys The physical address to start reading at.
2191 * @param pvBuf Where to put the bits we read.
2192 * @param cb How much to read - less or equal to a page.
2193 * @param enmOrigin The origin of this call.
2194 */
2195static VBOXSTRICTRC pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2196 PGMACCESSORIGIN enmOrigin)
2197{
2198 /*
2199 * The most frequent access here is MMIO and shadowed ROM.
2200 * The current code ASSUMES all these access handlers covers full pages!
2201 */
2202
2203 /*
2204 * Whatever we do we need the source page, map it first.
2205 */
2206 PGMPAGEMAPLOCK PgMpLck;
2207 const void *pvSrc = NULL;
2208 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2209 if (RT_FAILURE(rc))
2210 {
2211 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2212 GCPhys, pPage, rc));
2213 memset(pvBuf, 0xff, cb);
2214 return VINF_SUCCESS;
2215 }
2216
2217 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2218
2219 /*
2220 * Deal with any physical handlers.
2221 */
2222 PVMCPU pVCpu = VMMGetCpu(pVM);
2223#ifdef IN_RING3
2224 PPGMPHYSHANDLER pPhys = NULL;
2225#endif
2226 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2227 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2228 {
2229#ifdef IN_RING3
2230 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2231 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2232 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2233 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2234 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2235
2236 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2237 void *pvUser = pPhys->CTX_SUFF(pvUser);
2238
2239 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2240 STAM_PROFILE_START(&pPhys->Stat, h);
2241 PGM_LOCK_ASSERT_OWNER(pVM);
2242 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2243 pgmUnlock(pVM);
2244 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2245 pgmLock(pVM);
2246# ifdef VBOX_WITH_STATISTICS
2247 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2248 if (pPhys)
2249 STAM_PROFILE_STOP(&pPhys->Stat, h);
2250# else
2251 pPhys = NULL; /* might not be valid anymore. */
2252# endif
2253 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT,
2254 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2255
2256#else
2257 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2258 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2259 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2260 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2261#endif
2262 }
2263
2264 /*
2265 * Deal with any virtual handlers.
2266 */
2267 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2268 {
2269 unsigned iPage;
2270 PPGMVIRTHANDLER pVirt = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &iPage);
2271 AssertReleaseMsg(pVirt, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2272 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2273 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2274 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2275
2276 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2277#ifdef IN_RING3
2278 if (pVirtType->pfnHandlerR3)
2279 {
2280 if (!pPhys)
2281 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2282 else
2283 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2284 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2285 + (iPage << PAGE_SHIFT)
2286 + (GCPhys & PAGE_OFFSET_MASK);
2287
2288 STAM_PROFILE_START(&pVirt->Stat, h);
2289 VBOXSTRICTRC rcStrict2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, (void *)pvSrc, pvBuf, cb,
2290 PGMACCESSTYPE_READ, enmOrigin, pVirt->CTX_SUFF(pvUser));
2291 STAM_PROFILE_STOP(&pVirt->Stat, h);
2292 if (rcStrict2 == VINF_SUCCESS)
2293 rcStrict = rcStrict == VINF_PGM_HANDLER_DO_DEFAULT ? VINF_SUCCESS : rcStrict;
2294 else if (rcStrict2 != VINF_PGM_HANDLER_DO_DEFAULT)
2295 {
2296 AssertLogRelMsgFailed(("rcStrict2=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2297 VBOXSTRICTRC_VAL(rcStrict2), GCPhys, pPage, pVirt->pszDesc));
2298 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT || rcStrict2 < rcStrict)
2299 rcStrict = rcStrict2;
2300 }
2301 }
2302 else
2303 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2304#else
2305 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2306 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2307 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2308 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2309#endif
2310 }
2311
2312 /*
2313 * Take the default action.
2314 */
2315 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2316 {
2317 memcpy(pvBuf, pvSrc, cb);
2318 rcStrict = VINF_SUCCESS;
2319 }
2320 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2321 return rcStrict;
2322}
2323
2324
2325/**
2326 * Read physical memory.
2327 *
2328 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2329 * want to ignore those.
2330 *
2331 * @returns VBox status code. Can be ignored in ring-3.
2332 * @retval VINF_SUCCESS.
2333 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2334 *
2335 * @param pVM Pointer to the VM.
2336 * @param GCPhys Physical address start reading from.
2337 * @param pvBuf Where to put the read bits.
2338 * @param cbRead How many bytes to read.
2339 * @param enmOrigin The origin of this call.
2340 */
2341VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2342{
2343 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2344 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2345
2346 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2347 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2348
2349 pgmLock(pVM);
2350
2351 /*
2352 * Copy loop on ram ranges.
2353 */
2354 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2355 for (;;)
2356 {
2357 /* Inside range or not? */
2358 if (pRam && GCPhys >= pRam->GCPhys)
2359 {
2360 /*
2361 * Must work our way thru this page by page.
2362 */
2363 RTGCPHYS off = GCPhys - pRam->GCPhys;
2364 while (off < pRam->cb)
2365 {
2366 unsigned iPage = off >> PAGE_SHIFT;
2367 PPGMPAGE pPage = &pRam->aPages[iPage];
2368 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2369 if (cb > cbRead)
2370 cb = cbRead;
2371
2372 /*
2373 * Normal page? Get the pointer to it.
2374 */
2375 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2376 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2377 {
2378 /*
2379 * Get the pointer to the page.
2380 */
2381 PGMPAGEMAPLOCK PgMpLck;
2382 const void *pvSrc;
2383 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2384 if (RT_SUCCESS(rc))
2385 {
2386 memcpy(pvBuf, pvSrc, cb);
2387 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2388 }
2389 else
2390 {
2391 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2392 pRam->GCPhys + off, pPage, rc));
2393 memset(pvBuf, 0xff, cb);
2394 }
2395 }
2396 /*
2397 * Have ALL/MMIO access handlers.
2398 */
2399 else
2400 {
2401 VBOXSTRICTRC rcStrict = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2402 if (rcStrict != VINF_SUCCESS)
2403 {
2404 pgmUnlock(pVM);
2405 return VBOXSTRICTRC_TODO(rcStrict);
2406 }
2407 }
2408
2409 /* next page */
2410 if (cb >= cbRead)
2411 {
2412 pgmUnlock(pVM);
2413 return VINF_SUCCESS;
2414 }
2415 cbRead -= cb;
2416 off += cb;
2417 pvBuf = (char *)pvBuf + cb;
2418 } /* walk pages in ram range. */
2419
2420 GCPhys = pRam->GCPhysLast + 1;
2421 }
2422 else
2423 {
2424 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2425
2426 /*
2427 * Unassigned address space.
2428 */
2429 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2430 if (cb >= cbRead)
2431 {
2432 memset(pvBuf, 0xff, cbRead);
2433 break;
2434 }
2435 memset(pvBuf, 0xff, cb);
2436
2437 cbRead -= cb;
2438 pvBuf = (char *)pvBuf + cb;
2439 GCPhys += cb;
2440 }
2441
2442 /* Advance range if necessary. */
2443 while (pRam && GCPhys > pRam->GCPhysLast)
2444 pRam = pRam->CTX_SUFF(pNext);
2445 } /* Ram range walk */
2446
2447 pgmUnlock(pVM);
2448 return VINF_SUCCESS;
2449}
2450
2451
2452/**
2453 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2454 *
2455 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2456 * @retval VINF_SUCCESS.
2457 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3 or with
2458 * PGMACCESSORIGIN_IEM.
2459 *
2460 * @param pVM Pointer to the VM.
2461 * @param pPage The page descriptor.
2462 * @param GCPhys The physical address to start writing at.
2463 * @param pvBuf What to write.
2464 * @param cbWrite How much to write - less or equal to a page.
2465 * @param enmOrigin The origin of this call.
2466 */
2467static VBOXSTRICTRC pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2468 PGMACCESSORIGIN enmOrigin)
2469{
2470 PGMPAGEMAPLOCK PgMpLck;
2471 void *pvDst = NULL;
2472 VBOXSTRICTRC rcStrict;
2473
2474 /*
2475 * Give priority to physical handlers (like #PF does).
2476 *
2477 * Hope for a lonely physical handler first that covers the whole
2478 * write area. This should be a pretty frequent case with MMIO and
2479 * the heavy usage of full page handlers in the page pool.
2480 */
2481 PVMCPU pVCpu = VMMGetCpu(pVM);
2482 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2483 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage) /* screw virtual handlers on MMIO pages */)
2484 {
2485 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2486 if (pCur)
2487 {
2488 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2489
2490 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2491 if (cbRange > cbWrite)
2492 cbRange = cbWrite;
2493
2494#ifndef IN_RING3
2495 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2496 NOREF(cbRange);
2497 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2498 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2499
2500#else /* IN_RING3 */
2501 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2502 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2503 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2504 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2505 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2506 else
2507 rcStrict = VINF_SUCCESS;
2508 if (RT_SUCCESS(rcStrict))
2509 {
2510 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2511 void *pvUser = pCur->CTX_SUFF(pvUser);
2512
2513 STAM_PROFILE_START(&pCur->Stat, h);
2514 PGM_LOCK_ASSERT_OWNER(pVM);
2515 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2516 pgmUnlock(pVM);
2517 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2518 pgmLock(pVM);
2519# ifdef VBOX_WITH_STATISTICS
2520 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2521 if (pCur)
2522 STAM_PROFILE_STOP(&pCur->Stat, h);
2523# else
2524 pCur = NULL; /* might not be valid anymore. */
2525# endif
2526 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2527 {
2528 if (pvDst)
2529 memcpy(pvDst, pvBuf, cbRange);
2530 rcStrict = VINF_SUCCESS;
2531 }
2532 else
2533 AssertLogRelMsg(rcStrict == VINF_SUCCESS,
2534 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2535 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? pCur->pszDesc : ""));
2536 }
2537 else
2538 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2539 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2540 if (RT_LIKELY(cbRange == cbWrite) || rcStrict != VINF_SUCCESS)
2541 {
2542 if (pvDst)
2543 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2544 return rcStrict;
2545 }
2546
2547 /* more fun to be had below */
2548 cbWrite -= cbRange;
2549 GCPhys += cbRange;
2550 pvBuf = (uint8_t *)pvBuf + cbRange;
2551 pvDst = (uint8_t *)pvDst + cbRange;
2552#endif /* IN_RING3 */
2553 }
2554 /* else: the handler is somewhere else in the page, deal with it below. */
2555 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2556 }
2557 /*
2558 * A virtual handler without any interfering physical handlers.
2559 * Hopefully it'll cover the whole write.
2560 */
2561 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2562 {
2563 unsigned iPage;
2564 PPGMVIRTHANDLER pVirt = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &iPage);
2565 if (pVirt)
2566 {
2567 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2568
2569 size_t cbRange = (PAGE_OFFSET_MASK & pVirt->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2570 if (cbRange > cbWrite)
2571 cbRange = cbWrite;
2572
2573#ifndef IN_RING3
2574 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2575 NOREF(cbRange);
2576 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2577 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2578
2579#else /* IN_RING3 */
2580
2581 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n",
2582 GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2583 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2584 if (RT_SUCCESS(rcStrict))
2585 {
2586 if (pVirtType->pfnHandlerR3)
2587 {
2588 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2589 + (iPage << PAGE_SHIFT)
2590 + (GCPhys & PAGE_OFFSET_MASK);
2591
2592 STAM_PROFILE_START(&pVirt->Stat, h);
2593 rcStrict = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange,
2594 PGMACCESSTYPE_WRITE, enmOrigin, pVirt->CTX_SUFF(pvUser));
2595 STAM_PROFILE_STOP(&pVirt->Stat, h);
2596 }
2597 else
2598 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2599 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2600 {
2601 memcpy(pvDst, pvBuf, cbRange);
2602 rcStrict = VINF_SUCCESS;
2603 }
2604 else
2605 AssertLogRelMsg(rcStrict == VINF_SUCCESS,
2606 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2607 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pVirt->pszDesc));
2608 }
2609 else
2610 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2611 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2612 if (RT_LIKELY(cbRange == cbWrite) || rcStrict != VINF_SUCCESS)
2613 {
2614 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2615 return rcStrict;
2616 }
2617
2618 /* more fun to be had below */
2619 cbWrite -= cbRange;
2620 GCPhys += cbRange;
2621 pvBuf = (uint8_t *)pvBuf + cbRange;
2622 pvDst = (uint8_t *)pvDst + cbRange;
2623#endif
2624 }
2625 /* else: the handler is somewhere else in the page, deal with it below. */
2626 }
2627
2628 /*
2629 * Deal with all the odd ends.
2630 */
2631
2632 /* We need a writable destination page. */
2633 if (!pvDst)
2634 {
2635 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2636 AssertLogRelMsgReturn(RT_SUCCESS(rcStrict),
2637 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2638 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2639 }
2640
2641 /* The loop state (big + ugly). */
2642 unsigned iVirtPage = 0;
2643 PPGMVIRTHANDLER pVirt = NULL;
2644 uint32_t offVirt = PAGE_SIZE;
2645 uint32_t offVirtLast = PAGE_SIZE;
2646 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2647
2648 PPGMPHYSHANDLER pPhys = NULL;
2649 uint32_t offPhys = PAGE_SIZE;
2650 uint32_t offPhysLast = PAGE_SIZE;
2651 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2652
2653 /* The loop. */
2654 for (;;)
2655 {
2656 /*
2657 * Find the closest handler at or above GCPhys.
2658 */
2659 if (fMoreVirt && !pVirt)
2660 {
2661 pVirt = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &iVirtPage);
2662 if (pVirt)
2663 {
2664 offVirt = 0;
2665 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2666 }
2667 else
2668 {
2669 PPGMPHYS2VIRTHANDLER pVirtPhys;
2670 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2671 GCPhys, true /* fAbove */);
2672 if ( pVirtPhys
2673 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2674 {
2675 /* ASSUME that pVirtPhys only covers one page. */
2676 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2677 Assert(pVirtPhys->Core.Key > GCPhys);
2678
2679 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2680 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2681 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2682 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2683 }
2684 else
2685 {
2686 pVirt = NULL;
2687 fMoreVirt = false;
2688 offVirt = offVirtLast = PAGE_SIZE;
2689 }
2690 }
2691 }
2692
2693 if (fMorePhys && !pPhys)
2694 {
2695 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2696 if (pPhys)
2697 {
2698 offPhys = 0;
2699 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2700 }
2701 else
2702 {
2703 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2704 GCPhys, true /* fAbove */);
2705 if ( pPhys
2706 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2707 {
2708 offPhys = pPhys->Core.Key - GCPhys;
2709 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2710 }
2711 else
2712 {
2713 pPhys = NULL;
2714 fMorePhys = false;
2715 offPhys = offPhysLast = PAGE_SIZE;
2716 }
2717 }
2718 }
2719
2720 /*
2721 * Handle access to space without handlers (that's easy).
2722 */
2723 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2724 uint32_t cbRange = (uint32_t)cbWrite;
2725 if (offPhys && offVirt)
2726 {
2727 if (cbRange > offPhys)
2728 cbRange = offPhys;
2729 if (cbRange > offVirt)
2730 cbRange = offVirt;
2731 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2732 }
2733 /*
2734 * Physical handler.
2735 */
2736 else if (!offPhys && offVirt)
2737 {
2738 if (cbRange > offPhysLast + 1)
2739 cbRange = offPhysLast + 1;
2740 if (cbRange > offVirt)
2741 cbRange = offVirt;
2742#ifdef IN_RING3
2743 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2744 void *pvUser = pPhys->CTX_SUFF(pvUser);
2745
2746 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2747 STAM_PROFILE_START(&pPhys->Stat, h);
2748 PGM_LOCK_ASSERT_OWNER(pVM);
2749 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2750 pgmUnlock(pVM);
2751 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2752 pgmLock(pVM);
2753# ifdef VBOX_WITH_STATISTICS
2754 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2755 if (pPhys)
2756 STAM_PROFILE_STOP(&pPhys->Stat, h);
2757# else
2758 pPhys = NULL; /* might not be valid anymore. */
2759# endif
2760 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT,
2761 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2762 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2763#else
2764 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2765 NOREF(cbRange);
2766 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2767 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2768 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2769#endif
2770 }
2771 /*
2772 * Virtual handler.
2773 */
2774 else if (offPhys && !offVirt)
2775 {
2776 if (cbRange > offVirtLast + 1)
2777 cbRange = offVirtLast + 1;
2778 if (cbRange > offPhys)
2779 cbRange = offPhys;
2780
2781 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2782#ifdef IN_RING3
2783 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2784 if (pVirtType->pfnHandlerR3)
2785 {
2786 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2787 + (iVirtPage << PAGE_SHIFT)
2788 + (GCPhys & PAGE_OFFSET_MASK);
2789 STAM_PROFILE_START(&pVirt->Stat, h);
2790 rcStrict = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2791 enmOrigin, pVirt->CTX_SUFF(pvUser));
2792 STAM_PROFILE_STOP(&pVirt->Stat, h);
2793 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT,
2794 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2795 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pVirt->pszDesc));
2796 }
2797 pVirt = NULL;
2798#else
2799 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2800 NOREF(cbRange);
2801 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2802 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2803 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2804#endif
2805 }
2806 /*
2807 * Both... give the physical one priority.
2808 */
2809 else
2810 {
2811 Assert(!offPhys && !offVirt);
2812 if (cbRange > offVirtLast + 1)
2813 cbRange = offVirtLast + 1;
2814 if (cbRange > offPhysLast + 1)
2815 cbRange = offPhysLast + 1;
2816
2817 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2818#ifdef IN_RING3
2819 if (pVirtType->pfnHandlerR3)
2820 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2821 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2822
2823 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2824 void *pvUser = pPhys->CTX_SUFF(pvUser);
2825
2826 STAM_PROFILE_START(&pPhys->Stat, h);
2827 PGM_LOCK_ASSERT_OWNER(pVM);
2828 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2829 pgmUnlock(pVM);
2830 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2831 pgmLock(pVM);
2832# ifdef VBOX_WITH_STATISTICS
2833 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2834 if (pPhys)
2835 STAM_PROFILE_STOP(&pPhys->Stat, h);
2836# else
2837 pPhys = NULL; /* might not be valid anymore. */
2838# endif
2839 AssertLogRelMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT,
2840 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2841 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2842 if (pVirtType->pfnHandlerR3)
2843 {
2844
2845 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2846 + (iVirtPage << PAGE_SHIFT)
2847 + (GCPhys & PAGE_OFFSET_MASK);
2848 STAM_PROFILE_START(&pVirt->Stat, h2);
2849 VBOXSTRICTRC rcStrict2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange,
2850 PGMACCESSTYPE_WRITE, enmOrigin, pVirt->CTX_SUFF(pvUser));
2851 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2852 if (rcStrict2 == VINF_SUCCESS)
2853 rcStrict = rcStrict == VINF_PGM_HANDLER_DO_DEFAULT ? VINF_SUCCESS : rcStrict;
2854 else if (rcStrict2 != VINF_PGM_HANDLER_DO_DEFAULT)
2855 {
2856 AssertLogRelMsgFailed(("rcStrict2=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2857 VBOXSTRICTRC_VAL(rcStrict2), GCPhys, pPage, pVirt->pszDesc));
2858 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_HANDLER_DO_DEFAULT || rcStrict2 < rcStrict)
2859 rcStrict = rcStrict2;
2860 }
2861 }
2862 pPhys = NULL;
2863 pVirt = NULL;
2864#else
2865 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2866 NOREF(cbRange);
2867 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2868 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2869 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2870#endif
2871 }
2872 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2873 {
2874 memcpy(pvDst, pvBuf, cbRange);
2875 rcStrict = VINF_SUCCESS;
2876 }
2877
2878 /*
2879 * Advance if we've got more stuff to do.
2880 */
2881 if (cbRange >= cbWrite || rcStrict != VINF_SUCCESS)
2882 {
2883 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2884 return rcStrict;
2885 }
2886
2887 cbWrite -= cbRange;
2888 GCPhys += cbRange;
2889 pvBuf = (uint8_t *)pvBuf + cbRange;
2890 pvDst = (uint8_t *)pvDst + cbRange;
2891
2892 offPhys -= cbRange;
2893 offPhysLast -= cbRange;
2894 offVirt -= cbRange;
2895 offVirtLast -= cbRange;
2896 }
2897}
2898
2899
2900/**
2901 * Write to physical memory.
2902 *
2903 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2904 * want to ignore those.
2905 *
2906 * @returns VBox status code. Can be ignored in ring-3.
2907 * @retval VINF_SUCCESS.
2908 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2909 *
2910 * @param pVM Pointer to the VM.
2911 * @param GCPhys Physical address to write to.
2912 * @param pvBuf What to write.
2913 * @param cbWrite How many bytes to write.
2914 * @param enmOrigin Who is calling.
2915 */
2916VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2917{
2918 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2919 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2920 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2921
2922 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2923 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2924
2925 pgmLock(pVM);
2926
2927 /*
2928 * Copy loop on ram ranges.
2929 */
2930 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2931 for (;;)
2932 {
2933 /* Inside range or not? */
2934 if (pRam && GCPhys >= pRam->GCPhys)
2935 {
2936 /*
2937 * Must work our way thru this page by page.
2938 */
2939 RTGCPTR off = GCPhys - pRam->GCPhys;
2940 while (off < pRam->cb)
2941 {
2942 RTGCPTR iPage = off >> PAGE_SHIFT;
2943 PPGMPAGE pPage = &pRam->aPages[iPage];
2944 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2945 if (cb > cbWrite)
2946 cb = cbWrite;
2947
2948 /*
2949 * Normal page? Get the pointer to it.
2950 */
2951 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2952 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2953 {
2954 PGMPAGEMAPLOCK PgMpLck;
2955 void *pvDst;
2956 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2957 if (RT_SUCCESS(rc))
2958 {
2959 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2960 memcpy(pvDst, pvBuf, cb);
2961 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2962 }
2963 /* Ignore writes to ballooned pages. */
2964 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2965 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2966 pRam->GCPhys + off, pPage, rc));
2967 }
2968 /*
2969 * Active WRITE or ALL access handlers.
2970 */
2971 else
2972 {
2973 VBOXSTRICTRC rcStrict = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2974 if (rcStrict != VINF_SUCCESS)
2975 {
2976 pgmUnlock(pVM);
2977 return VBOXSTRICTRC_TODO(rcStrict);
2978 }
2979 }
2980
2981 /* next page */
2982 if (cb >= cbWrite)
2983 {
2984 pgmUnlock(pVM);
2985 return VINF_SUCCESS;
2986 }
2987
2988 cbWrite -= cb;
2989 off += cb;
2990 pvBuf = (const char *)pvBuf + cb;
2991 } /* walk pages in ram range */
2992
2993 GCPhys = pRam->GCPhysLast + 1;
2994 }
2995 else
2996 {
2997 /*
2998 * Unassigned address space, skip it.
2999 */
3000 if (!pRam)
3001 break;
3002 size_t cb = pRam->GCPhys - GCPhys;
3003 if (cb >= cbWrite)
3004 break;
3005 cbWrite -= cb;
3006 pvBuf = (const char *)pvBuf + cb;
3007 GCPhys += cb;
3008 }
3009
3010 /* Advance range if necessary. */
3011 while (pRam && GCPhys > pRam->GCPhysLast)
3012 pRam = pRam->CTX_SUFF(pNext);
3013 } /* Ram range walk */
3014
3015 pgmUnlock(pVM);
3016 return VINF_SUCCESS;
3017}
3018
3019
3020/**
3021 * Read from guest physical memory by GC physical address, bypassing
3022 * MMIO and access handlers.
3023 *
3024 * @returns VBox status.
3025 * @param pVM Pointer to the VM.
3026 * @param pvDst The destination address.
3027 * @param GCPhysSrc The source address (GC physical address).
3028 * @param cb The number of bytes to read.
3029 */
3030VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3031{
3032 /*
3033 * Treat the first page as a special case.
3034 */
3035 if (!cb)
3036 return VINF_SUCCESS;
3037
3038 /* map the 1st page */
3039 void const *pvSrc;
3040 PGMPAGEMAPLOCK Lock;
3041 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3042 if (RT_FAILURE(rc))
3043 return rc;
3044
3045 /* optimize for the case where access is completely within the first page. */
3046 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
3047 if (RT_LIKELY(cb <= cbPage))
3048 {
3049 memcpy(pvDst, pvSrc, cb);
3050 PGMPhysReleasePageMappingLock(pVM, &Lock);
3051 return VINF_SUCCESS;
3052 }
3053
3054 /* copy to the end of the page. */
3055 memcpy(pvDst, pvSrc, cbPage);
3056 PGMPhysReleasePageMappingLock(pVM, &Lock);
3057 GCPhysSrc += cbPage;
3058 pvDst = (uint8_t *)pvDst + cbPage;
3059 cb -= cbPage;
3060
3061 /*
3062 * Page by page.
3063 */
3064 for (;;)
3065 {
3066 /* map the page */
3067 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3068 if (RT_FAILURE(rc))
3069 return rc;
3070
3071 /* last page? */
3072 if (cb <= PAGE_SIZE)
3073 {
3074 memcpy(pvDst, pvSrc, cb);
3075 PGMPhysReleasePageMappingLock(pVM, &Lock);
3076 return VINF_SUCCESS;
3077 }
3078
3079 /* copy the entire page and advance */
3080 memcpy(pvDst, pvSrc, PAGE_SIZE);
3081 PGMPhysReleasePageMappingLock(pVM, &Lock);
3082 GCPhysSrc += PAGE_SIZE;
3083 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3084 cb -= PAGE_SIZE;
3085 }
3086 /* won't ever get here. */
3087}
3088
3089
3090/**
3091 * Write to guest physical memory referenced by GC pointer.
3092 * Write memory to GC physical address in guest physical memory.
3093 *
3094 * This will bypass MMIO and access handlers.
3095 *
3096 * @returns VBox status.
3097 * @param pVM Pointer to the VM.
3098 * @param GCPhysDst The GC physical address of the destination.
3099 * @param pvSrc The source buffer.
3100 * @param cb The number of bytes to write.
3101 */
3102VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3103{
3104 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3105
3106 /*
3107 * Treat the first page as a special case.
3108 */
3109 if (!cb)
3110 return VINF_SUCCESS;
3111
3112 /* map the 1st page */
3113 void *pvDst;
3114 PGMPAGEMAPLOCK Lock;
3115 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3116 if (RT_FAILURE(rc))
3117 return rc;
3118
3119 /* optimize for the case where access is completely within the first page. */
3120 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
3121 if (RT_LIKELY(cb <= cbPage))
3122 {
3123 memcpy(pvDst, pvSrc, cb);
3124 PGMPhysReleasePageMappingLock(pVM, &Lock);
3125 return VINF_SUCCESS;
3126 }
3127
3128 /* copy to the end of the page. */
3129 memcpy(pvDst, pvSrc, cbPage);
3130 PGMPhysReleasePageMappingLock(pVM, &Lock);
3131 GCPhysDst += cbPage;
3132 pvSrc = (const uint8_t *)pvSrc + cbPage;
3133 cb -= cbPage;
3134
3135 /*
3136 * Page by page.
3137 */
3138 for (;;)
3139 {
3140 /* map the page */
3141 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3142 if (RT_FAILURE(rc))
3143 return rc;
3144
3145 /* last page? */
3146 if (cb <= PAGE_SIZE)
3147 {
3148 memcpy(pvDst, pvSrc, cb);
3149 PGMPhysReleasePageMappingLock(pVM, &Lock);
3150 return VINF_SUCCESS;
3151 }
3152
3153 /* copy the entire page and advance */
3154 memcpy(pvDst, pvSrc, PAGE_SIZE);
3155 PGMPhysReleasePageMappingLock(pVM, &Lock);
3156 GCPhysDst += PAGE_SIZE;
3157 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3158 cb -= PAGE_SIZE;
3159 }
3160 /* won't ever get here. */
3161}
3162
3163
3164/**
3165 * Read from guest physical memory referenced by GC pointer.
3166 *
3167 * This function uses the current CR3/CR0/CR4 of the guest and will
3168 * bypass access handlers and not set any accessed bits.
3169 *
3170 * @returns VBox status.
3171 * @param pVCpu Handle to the current virtual CPU.
3172 * @param pvDst The destination address.
3173 * @param GCPtrSrc The source address (GC pointer).
3174 * @param cb The number of bytes to read.
3175 */
3176VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3177{
3178 PVM pVM = pVCpu->CTX_SUFF(pVM);
3179/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3180
3181 /*
3182 * Treat the first page as a special case.
3183 */
3184 if (!cb)
3185 return VINF_SUCCESS;
3186
3187 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3188 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3189
3190 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3191 * when many VCPUs are fighting for the lock.
3192 */
3193 pgmLock(pVM);
3194
3195 /* map the 1st page */
3196 void const *pvSrc;
3197 PGMPAGEMAPLOCK Lock;
3198 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3199 if (RT_FAILURE(rc))
3200 {
3201 pgmUnlock(pVM);
3202 return rc;
3203 }
3204
3205 /* optimize for the case where access is completely within the first page. */
3206 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3207 if (RT_LIKELY(cb <= cbPage))
3208 {
3209 memcpy(pvDst, pvSrc, cb);
3210 PGMPhysReleasePageMappingLock(pVM, &Lock);
3211 pgmUnlock(pVM);
3212 return VINF_SUCCESS;
3213 }
3214
3215 /* copy to the end of the page. */
3216 memcpy(pvDst, pvSrc, cbPage);
3217 PGMPhysReleasePageMappingLock(pVM, &Lock);
3218 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3219 pvDst = (uint8_t *)pvDst + cbPage;
3220 cb -= cbPage;
3221
3222 /*
3223 * Page by page.
3224 */
3225 for (;;)
3226 {
3227 /* map the page */
3228 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3229 if (RT_FAILURE(rc))
3230 {
3231 pgmUnlock(pVM);
3232 return rc;
3233 }
3234
3235 /* last page? */
3236 if (cb <= PAGE_SIZE)
3237 {
3238 memcpy(pvDst, pvSrc, cb);
3239 PGMPhysReleasePageMappingLock(pVM, &Lock);
3240 pgmUnlock(pVM);
3241 return VINF_SUCCESS;
3242 }
3243
3244 /* copy the entire page and advance */
3245 memcpy(pvDst, pvSrc, PAGE_SIZE);
3246 PGMPhysReleasePageMappingLock(pVM, &Lock);
3247 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3248 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3249 cb -= PAGE_SIZE;
3250 }
3251 /* won't ever get here. */
3252}
3253
3254
3255/**
3256 * Write to guest physical memory referenced by GC pointer.
3257 *
3258 * This function uses the current CR3/CR0/CR4 of the guest and will
3259 * bypass access handlers and not set dirty or accessed bits.
3260 *
3261 * @returns VBox status.
3262 * @param pVCpu Handle to the current virtual CPU.
3263 * @param GCPtrDst The destination address (GC pointer).
3264 * @param pvSrc The source address.
3265 * @param cb The number of bytes to write.
3266 */
3267VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3268{
3269 PVM pVM = pVCpu->CTX_SUFF(pVM);
3270 VMCPU_ASSERT_EMT(pVCpu);
3271
3272 /*
3273 * Treat the first page as a special case.
3274 */
3275 if (!cb)
3276 return VINF_SUCCESS;
3277
3278 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3279 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3280
3281 /* map the 1st page */
3282 void *pvDst;
3283 PGMPAGEMAPLOCK Lock;
3284 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3285 if (RT_FAILURE(rc))
3286 return rc;
3287
3288 /* optimize for the case where access is completely within the first page. */
3289 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3290 if (RT_LIKELY(cb <= cbPage))
3291 {
3292 memcpy(pvDst, pvSrc, cb);
3293 PGMPhysReleasePageMappingLock(pVM, &Lock);
3294 return VINF_SUCCESS;
3295 }
3296
3297 /* copy to the end of the page. */
3298 memcpy(pvDst, pvSrc, cbPage);
3299 PGMPhysReleasePageMappingLock(pVM, &Lock);
3300 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3301 pvSrc = (const uint8_t *)pvSrc + cbPage;
3302 cb -= cbPage;
3303
3304 /*
3305 * Page by page.
3306 */
3307 for (;;)
3308 {
3309 /* map the page */
3310 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3311 if (RT_FAILURE(rc))
3312 return rc;
3313
3314 /* last page? */
3315 if (cb <= PAGE_SIZE)
3316 {
3317 memcpy(pvDst, pvSrc, cb);
3318 PGMPhysReleasePageMappingLock(pVM, &Lock);
3319 return VINF_SUCCESS;
3320 }
3321
3322 /* copy the entire page and advance */
3323 memcpy(pvDst, pvSrc, PAGE_SIZE);
3324 PGMPhysReleasePageMappingLock(pVM, &Lock);
3325 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3326 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3327 cb -= PAGE_SIZE;
3328 }
3329 /* won't ever get here. */
3330}
3331
3332
3333/**
3334 * Write to guest physical memory referenced by GC pointer and update the PTE.
3335 *
3336 * This function uses the current CR3/CR0/CR4 of the guest and will
3337 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3338 *
3339 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3340 *
3341 * @returns VBox status.
3342 * @param pVCpu Handle to the current virtual CPU.
3343 * @param GCPtrDst The destination address (GC pointer).
3344 * @param pvSrc The source address.
3345 * @param cb The number of bytes to write.
3346 */
3347VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3348{
3349 PVM pVM = pVCpu->CTX_SUFF(pVM);
3350 VMCPU_ASSERT_EMT(pVCpu);
3351
3352 /*
3353 * Treat the first page as a special case.
3354 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3355 */
3356 if (!cb)
3357 return VINF_SUCCESS;
3358
3359 /* map the 1st page */
3360 void *pvDst;
3361 PGMPAGEMAPLOCK Lock;
3362 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3363 if (RT_FAILURE(rc))
3364 return rc;
3365
3366 /* optimize for the case where access is completely within the first page. */
3367 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3368 if (RT_LIKELY(cb <= cbPage))
3369 {
3370 memcpy(pvDst, pvSrc, cb);
3371 PGMPhysReleasePageMappingLock(pVM, &Lock);
3372 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3373 return VINF_SUCCESS;
3374 }
3375
3376 /* copy to the end of the page. */
3377 memcpy(pvDst, pvSrc, cbPage);
3378 PGMPhysReleasePageMappingLock(pVM, &Lock);
3379 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3380 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3381 pvSrc = (const uint8_t *)pvSrc + cbPage;
3382 cb -= cbPage;
3383
3384 /*
3385 * Page by page.
3386 */
3387 for (;;)
3388 {
3389 /* map the page */
3390 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3391 if (RT_FAILURE(rc))
3392 return rc;
3393
3394 /* last page? */
3395 if (cb <= PAGE_SIZE)
3396 {
3397 memcpy(pvDst, pvSrc, cb);
3398 PGMPhysReleasePageMappingLock(pVM, &Lock);
3399 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3400 return VINF_SUCCESS;
3401 }
3402
3403 /* copy the entire page and advance */
3404 memcpy(pvDst, pvSrc, PAGE_SIZE);
3405 PGMPhysReleasePageMappingLock(pVM, &Lock);
3406 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3407 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3408 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3409 cb -= PAGE_SIZE;
3410 }
3411 /* won't ever get here. */
3412}
3413
3414
3415/**
3416 * Read from guest physical memory referenced by GC pointer.
3417 *
3418 * This function uses the current CR3/CR0/CR4 of the guest and will
3419 * respect access handlers and set accessed bits.
3420 *
3421 * @returns VBox status.
3422 * @param pVCpu Handle to the current virtual CPU.
3423 * @param pvDst The destination address.
3424 * @param GCPtrSrc The source address (GC pointer).
3425 * @param cb The number of bytes to read.
3426 * @param enmOrigin Who is calling.
3427 * @thread EMT(pVCpu)
3428 */
3429VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3430{
3431 RTGCPHYS GCPhys;
3432 uint64_t fFlags;
3433 int rc;
3434 PVM pVM = pVCpu->CTX_SUFF(pVM);
3435 VMCPU_ASSERT_EMT(pVCpu);
3436
3437 /*
3438 * Anything to do?
3439 */
3440 if (!cb)
3441 return VINF_SUCCESS;
3442
3443 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3444
3445 /*
3446 * Optimize reads within a single page.
3447 */
3448 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3449 {
3450 /* Convert virtual to physical address + flags */
3451 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3452 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3453 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3454
3455 /* mark the guest page as accessed. */
3456 if (!(fFlags & X86_PTE_A))
3457 {
3458 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3459 AssertRC(rc);
3460 }
3461
3462 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3463 }
3464
3465 /*
3466 * Page by page.
3467 */
3468 for (;;)
3469 {
3470 /* Convert virtual to physical address + flags */
3471 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3472 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3473 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3474
3475 /* mark the guest page as accessed. */
3476 if (!(fFlags & X86_PTE_A))
3477 {
3478 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3479 AssertRC(rc);
3480 }
3481
3482 /* copy */
3483 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3484 if (cbRead < cb)
3485 {
3486 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3487 if (RT_FAILURE(rc))
3488 return rc;
3489 }
3490 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3491 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3492
3493 /* next */
3494 Assert(cb > cbRead);
3495 cb -= cbRead;
3496 pvDst = (uint8_t *)pvDst + cbRead;
3497 GCPtrSrc += cbRead;
3498 }
3499}
3500
3501
3502/**
3503 * Write to guest physical memory referenced by GC pointer.
3504 *
3505 * This function uses the current CR3/CR0/CR4 of the guest and will
3506 * respect access handlers and set dirty and accessed bits.
3507 *
3508 * @returns VBox status.
3509 * @retval VINF_SUCCESS.
3510 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3511 *
3512 * @param pVCpu Handle to the current virtual CPU.
3513 * @param GCPtrDst The destination address (GC pointer).
3514 * @param pvSrc The source address.
3515 * @param cb The number of bytes to write.
3516 * @param enmOrigin Who is calling.
3517 */
3518VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3519{
3520 RTGCPHYS GCPhys;
3521 uint64_t fFlags;
3522 int rc;
3523 PVM pVM = pVCpu->CTX_SUFF(pVM);
3524 VMCPU_ASSERT_EMT(pVCpu);
3525
3526 /*
3527 * Anything to do?
3528 */
3529 if (!cb)
3530 return VINF_SUCCESS;
3531
3532 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3533
3534 /*
3535 * Optimize writes within a single page.
3536 */
3537 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3538 {
3539 /* Convert virtual to physical address + flags */
3540 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3541 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3542 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3543
3544 /* Mention when we ignore X86_PTE_RW... */
3545 if (!(fFlags & X86_PTE_RW))
3546 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3547
3548 /* Mark the guest page as accessed and dirty if necessary. */
3549 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3550 {
3551 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3552 AssertRC(rc);
3553 }
3554
3555 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3556 }
3557
3558 /*
3559 * Page by page.
3560 */
3561 for (;;)
3562 {
3563 /* Convert virtual to physical address + flags */
3564 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3565 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3566 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3567
3568 /* Mention when we ignore X86_PTE_RW... */
3569 if (!(fFlags & X86_PTE_RW))
3570 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3571
3572 /* Mark the guest page as accessed and dirty if necessary. */
3573 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3574 {
3575 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3576 AssertRC(rc);
3577 }
3578
3579 /* copy */
3580 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3581 if (cbWrite < cb)
3582 {
3583 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3584 if (RT_FAILURE(rc))
3585 return rc;
3586 }
3587 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3588 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3589
3590 /* next */
3591 Assert(cb > cbWrite);
3592 cb -= cbWrite;
3593 pvSrc = (uint8_t *)pvSrc + cbWrite;
3594 GCPtrDst += cbWrite;
3595 }
3596}
3597
3598
3599/**
3600 * Performs a read of guest virtual memory for instruction emulation.
3601 *
3602 * This will check permissions, raise exceptions and update the access bits.
3603 *
3604 * The current implementation will bypass all access handlers. It may later be
3605 * changed to at least respect MMIO.
3606 *
3607 *
3608 * @returns VBox status code suitable to scheduling.
3609 * @retval VINF_SUCCESS if the read was performed successfully.
3610 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3611 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3612 *
3613 * @param pVCpu Handle to the current virtual CPU.
3614 * @param pCtxCore The context core.
3615 * @param pvDst Where to put the bytes we've read.
3616 * @param GCPtrSrc The source address.
3617 * @param cb The number of bytes to read. Not more than a page.
3618 *
3619 * @remark This function will dynamically map physical pages in GC. This may unmap
3620 * mappings done by the caller. Be careful!
3621 */
3622VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3623{
3624 PVM pVM = pVCpu->CTX_SUFF(pVM);
3625 Assert(cb <= PAGE_SIZE);
3626 VMCPU_ASSERT_EMT(pVCpu);
3627
3628/** @todo r=bird: This isn't perfect!
3629 * -# It's not checking for reserved bits being 1.
3630 * -# It's not correctly dealing with the access bit.
3631 * -# It's not respecting MMIO memory or any other access handlers.
3632 */
3633 /*
3634 * 1. Translate virtual to physical. This may fault.
3635 * 2. Map the physical address.
3636 * 3. Do the read operation.
3637 * 4. Set access bits if required.
3638 */
3639 int rc;
3640 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3641 if (cb <= cb1)
3642 {
3643 /*
3644 * Not crossing pages.
3645 */
3646 RTGCPHYS GCPhys;
3647 uint64_t fFlags;
3648 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3649 if (RT_SUCCESS(rc))
3650 {
3651 /** @todo we should check reserved bits ... */
3652 PGMPAGEMAPLOCK PgMpLck;
3653 void const *pvSrc;
3654 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3655 switch (rc)
3656 {
3657 case VINF_SUCCESS:
3658 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3659 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3660 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3661 break;
3662 case VERR_PGM_PHYS_PAGE_RESERVED:
3663 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3664 memset(pvDst, 0xff, cb);
3665 break;
3666 default:
3667 Assert(RT_FAILURE_NP(rc));
3668 return rc;
3669 }
3670
3671 /** @todo access bit emulation isn't 100% correct. */
3672 if (!(fFlags & X86_PTE_A))
3673 {
3674 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3675 AssertRC(rc);
3676 }
3677 return VINF_SUCCESS;
3678 }
3679 }
3680 else
3681 {
3682 /*
3683 * Crosses pages.
3684 */
3685 size_t cb2 = cb - cb1;
3686 uint64_t fFlags1;
3687 RTGCPHYS GCPhys1;
3688 uint64_t fFlags2;
3689 RTGCPHYS GCPhys2;
3690 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3691 if (RT_SUCCESS(rc))
3692 {
3693 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3694 if (RT_SUCCESS(rc))
3695 {
3696 /** @todo we should check reserved bits ... */
3697 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3698 PGMPAGEMAPLOCK PgMpLck;
3699 void const *pvSrc1;
3700 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3701 switch (rc)
3702 {
3703 case VINF_SUCCESS:
3704 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3705 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3706 break;
3707 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3708 memset(pvDst, 0xff, cb1);
3709 break;
3710 default:
3711 Assert(RT_FAILURE_NP(rc));
3712 return rc;
3713 }
3714
3715 void const *pvSrc2;
3716 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3717 switch (rc)
3718 {
3719 case VINF_SUCCESS:
3720 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3721 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3722 break;
3723 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3724 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3725 break;
3726 default:
3727 Assert(RT_FAILURE_NP(rc));
3728 return rc;
3729 }
3730
3731 if (!(fFlags1 & X86_PTE_A))
3732 {
3733 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3734 AssertRC(rc);
3735 }
3736 if (!(fFlags2 & X86_PTE_A))
3737 {
3738 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3739 AssertRC(rc);
3740 }
3741 return VINF_SUCCESS;
3742 }
3743 }
3744 }
3745
3746 /*
3747 * Raise a #PF.
3748 */
3749 uint32_t uErr;
3750
3751 /* Get the current privilege level. */
3752 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3753 switch (rc)
3754 {
3755 case VINF_SUCCESS:
3756 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3757 break;
3758
3759 case VERR_PAGE_NOT_PRESENT:
3760 case VERR_PAGE_TABLE_NOT_PRESENT:
3761 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3762 break;
3763
3764 default:
3765 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3766 return rc;
3767 }
3768 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3769 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3770}
3771
3772
3773/**
3774 * Performs a read of guest virtual memory for instruction emulation.
3775 *
3776 * This will check permissions, raise exceptions and update the access bits.
3777 *
3778 * The current implementation will bypass all access handlers. It may later be
3779 * changed to at least respect MMIO.
3780 *
3781 *
3782 * @returns VBox status code suitable to scheduling.
3783 * @retval VINF_SUCCESS if the read was performed successfully.
3784 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3785 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3786 *
3787 * @param pVCpu Handle to the current virtual CPU.
3788 * @param pCtxCore The context core.
3789 * @param pvDst Where to put the bytes we've read.
3790 * @param GCPtrSrc The source address.
3791 * @param cb The number of bytes to read. Not more than a page.
3792 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3793 * an appropriate error status will be returned (no
3794 * informational at all).
3795 *
3796 *
3797 * @remarks Takes the PGM lock.
3798 * @remarks A page fault on the 2nd page of the access will be raised without
3799 * writing the bits on the first page since we're ASSUMING that the
3800 * caller is emulating an instruction access.
3801 * @remarks This function will dynamically map physical pages in GC. This may
3802 * unmap mappings done by the caller. Be careful!
3803 */
3804VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3805 bool fRaiseTrap)
3806{
3807 PVM pVM = pVCpu->CTX_SUFF(pVM);
3808 Assert(cb <= PAGE_SIZE);
3809 VMCPU_ASSERT_EMT(pVCpu);
3810
3811 /*
3812 * 1. Translate virtual to physical. This may fault.
3813 * 2. Map the physical address.
3814 * 3. Do the read operation.
3815 * 4. Set access bits if required.
3816 */
3817 int rc;
3818 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3819 if (cb <= cb1)
3820 {
3821 /*
3822 * Not crossing pages.
3823 */
3824 RTGCPHYS GCPhys;
3825 uint64_t fFlags;
3826 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3827 if (RT_SUCCESS(rc))
3828 {
3829 if (1) /** @todo we should check reserved bits ... */
3830 {
3831 const void *pvSrc;
3832 PGMPAGEMAPLOCK Lock;
3833 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3834 switch (rc)
3835 {
3836 case VINF_SUCCESS:
3837 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3838 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3839 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3840 PGMPhysReleasePageMappingLock(pVM, &Lock);
3841 break;
3842 case VERR_PGM_PHYS_PAGE_RESERVED:
3843 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3844 memset(pvDst, 0xff, cb);
3845 break;
3846 default:
3847 AssertMsgFailed(("%Rrc\n", rc));
3848 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3849 return rc;
3850 }
3851
3852 if (!(fFlags & X86_PTE_A))
3853 {
3854 /** @todo access bit emulation isn't 100% correct. */
3855 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3856 AssertRC(rc);
3857 }
3858 return VINF_SUCCESS;
3859 }
3860 }
3861 }
3862 else
3863 {
3864 /*
3865 * Crosses pages.
3866 */
3867 size_t cb2 = cb - cb1;
3868 uint64_t fFlags1;
3869 RTGCPHYS GCPhys1;
3870 uint64_t fFlags2;
3871 RTGCPHYS GCPhys2;
3872 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3873 if (RT_SUCCESS(rc))
3874 {
3875 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3876 if (RT_SUCCESS(rc))
3877 {
3878 if (1) /** @todo we should check reserved bits ... */
3879 {
3880 const void *pvSrc;
3881 PGMPAGEMAPLOCK Lock;
3882 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3883 switch (rc)
3884 {
3885 case VINF_SUCCESS:
3886 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3887 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3888 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3889 PGMPhysReleasePageMappingLock(pVM, &Lock);
3890 break;
3891 case VERR_PGM_PHYS_PAGE_RESERVED:
3892 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3893 memset(pvDst, 0xff, cb1);
3894 break;
3895 default:
3896 AssertMsgFailed(("%Rrc\n", rc));
3897 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3898 return rc;
3899 }
3900
3901 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3902 switch (rc)
3903 {
3904 case VINF_SUCCESS:
3905 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3906 PGMPhysReleasePageMappingLock(pVM, &Lock);
3907 break;
3908 case VERR_PGM_PHYS_PAGE_RESERVED:
3909 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3910 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3911 break;
3912 default:
3913 AssertMsgFailed(("%Rrc\n", rc));
3914 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3915 return rc;
3916 }
3917
3918 if (!(fFlags1 & X86_PTE_A))
3919 {
3920 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3921 AssertRC(rc);
3922 }
3923 if (!(fFlags2 & X86_PTE_A))
3924 {
3925 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3926 AssertRC(rc);
3927 }
3928 return VINF_SUCCESS;
3929 }
3930 /* sort out which page */
3931 }
3932 else
3933 GCPtrSrc += cb1; /* fault on 2nd page */
3934 }
3935 }
3936
3937 /*
3938 * Raise a #PF if we're allowed to do that.
3939 */
3940 /* Calc the error bits. */
3941 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3942 uint32_t uErr;
3943 switch (rc)
3944 {
3945 case VINF_SUCCESS:
3946 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3947 rc = VERR_ACCESS_DENIED;
3948 break;
3949
3950 case VERR_PAGE_NOT_PRESENT:
3951 case VERR_PAGE_TABLE_NOT_PRESENT:
3952 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3953 break;
3954
3955 default:
3956 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3957 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3958 return rc;
3959 }
3960 if (fRaiseTrap)
3961 {
3962 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3963 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3964 }
3965 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3966 return rc;
3967}
3968
3969
3970/**
3971 * Performs a write to guest virtual memory for instruction emulation.
3972 *
3973 * This will check permissions, raise exceptions and update the dirty and access
3974 * bits.
3975 *
3976 * @returns VBox status code suitable to scheduling.
3977 * @retval VINF_SUCCESS if the read was performed successfully.
3978 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3979 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3980 *
3981 * @param pVCpu Handle to the current virtual CPU.
3982 * @param pCtxCore The context core.
3983 * @param GCPtrDst The destination address.
3984 * @param pvSrc What to write.
3985 * @param cb The number of bytes to write. Not more than a page.
3986 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3987 * an appropriate error status will be returned (no
3988 * informational at all).
3989 *
3990 * @remarks Takes the PGM lock.
3991 * @remarks A page fault on the 2nd page of the access will be raised without
3992 * writing the bits on the first page since we're ASSUMING that the
3993 * caller is emulating an instruction access.
3994 * @remarks This function will dynamically map physical pages in GC. This may
3995 * unmap mappings done by the caller. Be careful!
3996 */
3997VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3998 size_t cb, bool fRaiseTrap)
3999{
4000 Assert(cb <= PAGE_SIZE);
4001 PVM pVM = pVCpu->CTX_SUFF(pVM);
4002 VMCPU_ASSERT_EMT(pVCpu);
4003
4004 /*
4005 * 1. Translate virtual to physical. This may fault.
4006 * 2. Map the physical address.
4007 * 3. Do the write operation.
4008 * 4. Set access bits if required.
4009 */
4010 /** @todo Since this method is frequently used by EMInterpret or IOM
4011 * upon a write fault to an write access monitored page, we can
4012 * reuse the guest page table walking from the \#PF code. */
4013 int rc;
4014 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
4015 if (cb <= cb1)
4016 {
4017 /*
4018 * Not crossing pages.
4019 */
4020 RTGCPHYS GCPhys;
4021 uint64_t fFlags;
4022 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
4023 if (RT_SUCCESS(rc))
4024 {
4025 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
4026 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4027 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
4028 {
4029 void *pvDst;
4030 PGMPAGEMAPLOCK Lock;
4031 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
4032 switch (rc)
4033 {
4034 case VINF_SUCCESS:
4035 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4036 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
4037 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
4038 PGMPhysReleasePageMappingLock(pVM, &Lock);
4039 break;
4040 case VERR_PGM_PHYS_PAGE_RESERVED:
4041 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4042 /* bit bucket */
4043 break;
4044 default:
4045 AssertMsgFailed(("%Rrc\n", rc));
4046 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4047 return rc;
4048 }
4049
4050 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
4051 {
4052 /** @todo dirty & access bit emulation isn't 100% correct. */
4053 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
4054 AssertRC(rc);
4055 }
4056 return VINF_SUCCESS;
4057 }
4058 rc = VERR_ACCESS_DENIED;
4059 }
4060 }
4061 else
4062 {
4063 /*
4064 * Crosses pages.
4065 */
4066 size_t cb2 = cb - cb1;
4067 uint64_t fFlags1;
4068 RTGCPHYS GCPhys1;
4069 uint64_t fFlags2;
4070 RTGCPHYS GCPhys2;
4071 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
4072 if (RT_SUCCESS(rc))
4073 {
4074 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
4075 if (RT_SUCCESS(rc))
4076 {
4077 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
4078 && (fFlags2 & X86_PTE_RW))
4079 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4080 && CPUMGetGuestCPL(pVCpu) <= 2) )
4081 {
4082 void *pvDst;
4083 PGMPAGEMAPLOCK Lock;
4084 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
4085 switch (rc)
4086 {
4087 case VINF_SUCCESS:
4088 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4089 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
4090 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
4091 PGMPhysReleasePageMappingLock(pVM, &Lock);
4092 break;
4093 case VERR_PGM_PHYS_PAGE_RESERVED:
4094 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4095 /* bit bucket */
4096 break;
4097 default:
4098 AssertMsgFailed(("%Rrc\n", rc));
4099 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4100 return rc;
4101 }
4102
4103 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
4104 switch (rc)
4105 {
4106 case VINF_SUCCESS:
4107 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
4108 PGMPhysReleasePageMappingLock(pVM, &Lock);
4109 break;
4110 case VERR_PGM_PHYS_PAGE_RESERVED:
4111 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4112 /* bit bucket */
4113 break;
4114 default:
4115 AssertMsgFailed(("%Rrc\n", rc));
4116 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4117 return rc;
4118 }
4119
4120 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
4121 {
4122 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4123 AssertRC(rc);
4124 }
4125 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
4126 {
4127 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4128 AssertRC(rc);
4129 }
4130 return VINF_SUCCESS;
4131 }
4132 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
4133 GCPtrDst += cb1; /* fault on the 2nd page. */
4134 rc = VERR_ACCESS_DENIED;
4135 }
4136 else
4137 GCPtrDst += cb1; /* fault on the 2nd page. */
4138 }
4139 }
4140
4141 /*
4142 * Raise a #PF if we're allowed to do that.
4143 */
4144 /* Calc the error bits. */
4145 uint32_t uErr;
4146 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4147 switch (rc)
4148 {
4149 case VINF_SUCCESS:
4150 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4151 rc = VERR_ACCESS_DENIED;
4152 break;
4153
4154 case VERR_ACCESS_DENIED:
4155 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4156 break;
4157
4158 case VERR_PAGE_NOT_PRESENT:
4159 case VERR_PAGE_TABLE_NOT_PRESENT:
4160 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4161 break;
4162
4163 default:
4164 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4165 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4166 return rc;
4167 }
4168 if (fRaiseTrap)
4169 {
4170 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4171 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
4172 }
4173 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4174 return rc;
4175}
4176
4177
4178/**
4179 * Return the page type of the specified physical address.
4180 *
4181 * @returns The page type.
4182 * @param pVM Pointer to the VM.
4183 * @param GCPhys Guest physical address
4184 */
4185VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
4186{
4187 pgmLock(pVM);
4188 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4189 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4190 pgmUnlock(pVM);
4191
4192 return enmPgType;
4193}
4194
4195
4196
4197
4198/**
4199 * Converts a GC physical address to a HC ring-3 pointer, with some
4200 * additional checks.
4201 *
4202 * @returns VBox status code (no informational statuses).
4203 * @retval VINF_SUCCESS on success.
4204 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4205 * access handler of some kind.
4206 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4207 * accesses or is odd in any way.
4208 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4209 *
4210 * @param pVM Pointer to the cross context VM structure.
4211 * @param pVCpu Pointer to the cross context virtual CPU structure of
4212 * the calling EMT.
4213 * @param GCPhys The GC physical address to convert. This API mask the
4214 * A20 line when necessary.
4215 * @param fWritable Whether write access is required.
4216 * @param ppv Where to store the pointer corresponding to GCPhys on
4217 * success.
4218 * @param pLock
4219 *
4220 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4221 * @thread EMT(pVCpu).
4222 */
4223VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4224 void **ppv, PPGMPAGEMAPLOCK pLock)
4225{
4226 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4227
4228 pgmLock(pVM);
4229
4230 PPGMRAMRANGE pRam;
4231 PPGMPAGE pPage;
4232 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4233 if (RT_SUCCESS(rc))
4234 {
4235 if (PGM_PAGE_IS_BALLOONED(pPage))
4236 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4237 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4238 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4239 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4240 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4241 rc = VINF_SUCCESS;
4242 else
4243 {
4244 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4245 {
4246 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4247 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4248 }
4249 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4250 {
4251 Assert(!fByPassHandlers);
4252 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4253 }
4254 }
4255 if (RT_SUCCESS(rc))
4256 {
4257 int rc2;
4258
4259 /* Make sure what we return is writable. */
4260 if (fWritable)
4261 switch (PGM_PAGE_GET_STATE(pPage))
4262 {
4263 case PGM_PAGE_STATE_ALLOCATED:
4264 break;
4265 case PGM_PAGE_STATE_BALLOONED:
4266 AssertFailed();
4267 case PGM_PAGE_STATE_ZERO:
4268 case PGM_PAGE_STATE_SHARED:
4269 case PGM_PAGE_STATE_WRITE_MONITORED:
4270 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4271 AssertLogRelRCReturn(rc2, rc2);
4272 break;
4273 }
4274
4275#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4276 void *pv;
4277 rc = pgmRZDynMapHCPageInlined(pVCpu,
4278 PGM_PAGE_GET_HCPHYS(pPage),
4279 &pv
4280 RTLOG_COMMA_SRC_POS);
4281 if (RT_FAILURE(rc))
4282 return rc;
4283 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4284 pLock->pvPage = pv;
4285 pLock->pVCpu = pVCpu;
4286
4287#else
4288 /* Get a ring-3 mapping of the address. */
4289 PPGMPAGER3MAPTLBE pTlbe;
4290 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4291 AssertLogRelRCReturn(rc2, rc2);
4292
4293 /* Lock it and calculate the address. */
4294 if (fWritable)
4295 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4296 else
4297 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4298 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4299#endif
4300
4301 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4302 }
4303 else
4304 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4305
4306 /* else: handler catching all access, no pointer returned. */
4307 }
4308 else
4309 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4310
4311 pgmUnlock(pVM);
4312 return rc;
4313}
4314
4315
4316/**
4317 * Checks if the give GCPhys page requires special handling for the given access
4318 * because it's MMIO or otherwise monitored.
4319 *
4320 * @returns VBox status code (no informational statuses).
4321 * @retval VINF_SUCCESS on success.
4322 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4323 * access handler of some kind.
4324 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4325 * accesses or is odd in any way.
4326 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4327 *
4328 * @param pVM Pointer to the VM.
4329 * @param GCPhys The GC physical address to convert. Since this is only
4330 * used for filling the REM TLB, the A20 mask must be
4331 * applied before calling this API.
4332 * @param fWritable Whether write access is required.
4333 *
4334 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4335 * a stop gap thing that should be removed once there is a better TLB
4336 * for virtual address accesses.
4337 */
4338VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4339{
4340 pgmLock(pVM);
4341 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4342
4343 PPGMRAMRANGE pRam;
4344 PPGMPAGE pPage;
4345 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4346 if (RT_SUCCESS(rc))
4347 {
4348 if (PGM_PAGE_IS_BALLOONED(pPage))
4349 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4350 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4351 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4352 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4353 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4354 rc = VINF_SUCCESS;
4355 else
4356 {
4357 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4358 {
4359 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4360 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4361 }
4362 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4363 {
4364 Assert(!fByPassHandlers);
4365 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4366 }
4367 }
4368 }
4369
4370 pgmUnlock(pVM);
4371 return rc;
4372}
4373
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette