VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 43030

Last change on this file since 43030 was 42633, checked in by vboxsync, 12 years ago

IEM: Implemented CMPXCHG8B. Fixed PGMPhysIemGCPhys2Ptr so that it doesn't return informational status returns, only VINF_SUCCESS and errors.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 145.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 42633 2012-08-06 17:22:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param uErrorCode CPU Error code.
61 * @param pRegFrame Trap register frame.
62 * @param pvFault The fault address (cr2).
63 * @param GCPhysFault The GC physical address corresponding to pvFault.
64 * @param pvUser User argument.
65 */
66VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
67{
68 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
69 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
70}
71
72
73/**
74 * \#PF Handler callback for Guest ROM range write access.
75 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
76 *
77 * @returns VBox status code (appropriate for trap handling and GC return).
78 * @param pVM Pointer to the VM.
79 * @param uErrorCode CPU Error code.
80 * @param pRegFrame Trap register frame.
81 * @param pvFault The fault address (cr2).
82 * @param GCPhysFault The GC physical address corresponding to pvFault.
83 * @param pvUser User argument. Pointer to the ROM range structure.
84 */
85VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
86{
87 int rc;
88 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
89 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
90 PVMCPU pVCpu = VMMGetCpu(pVM);
91 NOREF(uErrorCode); NOREF(pvFault);
92
93 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
94
95 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
96 switch (pRom->aPages[iPage].enmProt)
97 {
98 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
99 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
100 {
101 /*
102 * If it's a simple instruction which doesn't change the cpu state
103 * we will simply skip it. Otherwise we'll have to defer it to REM.
104 */
105 uint32_t cbOp;
106 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
107 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
108 if ( RT_SUCCESS(rc)
109 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
110 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
111 {
112 switch (pDis->bOpCode)
113 {
114 /** @todo Find other instructions we can safely skip, possibly
115 * adding this kind of detection to DIS or EM. */
116 case OP_MOV:
117 pRegFrame->rip += cbOp;
118 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
119 return VINF_SUCCESS;
120 }
121 }
122 else if (RT_UNLIKELY(rc == VERR_EM_INTERNAL_DISAS_ERROR))
123 return rc;
124 break;
125 }
126
127 case PGMROMPROT_READ_RAM_WRITE_RAM:
128 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
129 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
130 AssertRC(rc);
131 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
132
133 case PGMROMPROT_READ_ROM_WRITE_RAM:
134 /* Handle it in ring-3 because it's *way* easier there. */
135 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
136 break;
137
138 default:
139 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
140 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
141 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
142 }
143
144 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
145 return VINF_EM_RAW_EMULATE_INSTR;
146}
147
148#endif /* IN_RING3 */
149
150/**
151 * Invalidates the RAM range TLBs.
152 *
153 * @param pVM Pointer to the VM.
154 */
155void pgmPhysInvalidRamRangeTlbs(PVM pVM)
156{
157 pgmLock(pVM);
158 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
159 {
160 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
161 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
162 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
163 }
164 pgmUnlock(pVM);
165}
166
167
168/**
169 * Tests if a value of type RTGCPHYS is negative if the type had been signed
170 * instead of unsigned.
171 *
172 * @returns @c true if negative, @c false if positive or zero.
173 * @param a_GCPhys The value to test.
174 * @todo Move me to iprt/types.h.
175 */
176#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
177
178
179/**
180 * Slow worker for pgmPhysGetRange.
181 *
182 * @copydoc pgmPhysGetRange
183 */
184PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
185{
186 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
187
188 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
189 while (pRam)
190 {
191 RTGCPHYS off = GCPhys - pRam->GCPhys;
192 if (off < pRam->cb)
193 {
194 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
195 return pRam;
196 }
197 if (RTGCPHYS_IS_NEGATIVE(off))
198 pRam = pRam->CTX_SUFF(pLeft);
199 else
200 pRam = pRam->CTX_SUFF(pRight);
201 }
202 return NULL;
203}
204
205
206/**
207 * Slow worker for pgmPhysGetRangeAtOrAbove.
208 *
209 * @copydoc pgmPhysGetRangeAtOrAbove
210 */
211PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
212{
213 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
214
215 PPGMRAMRANGE pLastLeft = NULL;
216 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
217 while (pRam)
218 {
219 RTGCPHYS off = GCPhys - pRam->GCPhys;
220 if (off < pRam->cb)
221 {
222 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
223 return pRam;
224 }
225 if (RTGCPHYS_IS_NEGATIVE(off))
226 {
227 pLastLeft = pRam;
228 pRam = pRam->CTX_SUFF(pLeft);
229 }
230 else
231 pRam = pRam->CTX_SUFF(pRight);
232 }
233 return pLastLeft;
234}
235
236
237/**
238 * Slow worker for pgmPhysGetPage.
239 *
240 * @copydoc pgmPhysGetPage
241 */
242PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
243{
244 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
245
246 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
247 while (pRam)
248 {
249 RTGCPHYS off = GCPhys - pRam->GCPhys;
250 if (off < pRam->cb)
251 {
252 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
253 return &pRam->aPages[off >> PAGE_SHIFT];
254 }
255
256 if (RTGCPHYS_IS_NEGATIVE(off))
257 pRam = pRam->CTX_SUFF(pLeft);
258 else
259 pRam = pRam->CTX_SUFF(pRight);
260 }
261 return NULL;
262}
263
264
265/**
266 * Slow worker for pgmPhysGetPageEx.
267 *
268 * @copydoc pgmPhysGetPageEx
269 */
270int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
271{
272 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
273
274 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
275 while (pRam)
276 {
277 RTGCPHYS off = GCPhys - pRam->GCPhys;
278 if (off < pRam->cb)
279 {
280 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
281 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
282 return VINF_SUCCESS;
283 }
284
285 if (RTGCPHYS_IS_NEGATIVE(off))
286 pRam = pRam->CTX_SUFF(pLeft);
287 else
288 pRam = pRam->CTX_SUFF(pRight);
289 }
290
291 *ppPage = NULL;
292 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
293}
294
295
296/**
297 * Slow worker for pgmPhysGetPageAndRangeEx.
298 *
299 * @copydoc pgmPhysGetPageAndRangeEx
300 */
301int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
302{
303 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
304
305 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
306 while (pRam)
307 {
308 RTGCPHYS off = GCPhys - pRam->GCPhys;
309 if (off < pRam->cb)
310 {
311 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
312 *ppRam = pRam;
313 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
314 return VINF_SUCCESS;
315 }
316
317 if (RTGCPHYS_IS_NEGATIVE(off))
318 pRam = pRam->CTX_SUFF(pLeft);
319 else
320 pRam = pRam->CTX_SUFF(pRight);
321 }
322
323 *ppRam = NULL;
324 *ppPage = NULL;
325 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
326}
327
328
329/**
330 * Checks if Address Gate 20 is enabled or not.
331 *
332 * @returns true if enabled.
333 * @returns false if disabled.
334 * @param pVCpu Pointer to the VMCPU.
335 */
336VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
337{
338 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
339 return pVCpu->pgm.s.fA20Enabled;
340}
341
342
343/**
344 * Validates a GC physical address.
345 *
346 * @returns true if valid.
347 * @returns false if invalid.
348 * @param pVM Pointer to the VM.
349 * @param GCPhys The physical address to validate.
350 */
351VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
352{
353 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
354 return pPage != NULL;
355}
356
357
358/**
359 * Checks if a GC physical address is a normal page,
360 * i.e. not ROM, MMIO or reserved.
361 *
362 * @returns true if normal.
363 * @returns false if invalid, ROM, MMIO or reserved page.
364 * @param pVM Pointer to the VM.
365 * @param GCPhys The physical address to check.
366 */
367VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
368{
369 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
370 return pPage
371 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
372}
373
374
375/**
376 * Converts a GC physical address to a HC physical address.
377 *
378 * @returns VINF_SUCCESS on success.
379 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
380 * page but has no physical backing.
381 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
382 * GC physical address.
383 *
384 * @param pVM Pointer to the VM.
385 * @param GCPhys The GC physical address to convert.
386 * @param pHCPhys Where to store the HC physical address on success.
387 */
388VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
389{
390 pgmLock(pVM);
391 PPGMPAGE pPage;
392 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
393 if (RT_SUCCESS(rc))
394 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
395 pgmUnlock(pVM);
396 return rc;
397}
398
399
400/**
401 * Invalidates all page mapping TLBs.
402 *
403 * @param pVM Pointer to the VM.
404 */
405void pgmPhysInvalidatePageMapTLB(PVM pVM)
406{
407 pgmLock(pVM);
408 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
409
410 /* Clear the shared R0/R3 TLB completely. */
411 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
412 {
413 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
414 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
415 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
416 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
417 }
418
419 /** @todo clear the RC TLB whenever we add it. */
420
421 pgmUnlock(pVM);
422}
423
424
425/**
426 * Invalidates a page mapping TLB entry
427 *
428 * @param pVM Pointer to the VM.
429 * @param GCPhys GCPhys entry to flush
430 */
431void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
432{
433 PGM_LOCK_ASSERT_OWNER(pVM);
434
435 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
436
437#ifdef IN_RC
438 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
439 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
440 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
441 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
442 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
443#else
444 /* Clear the shared R0/R3 TLB entry. */
445 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
446 pTlbe->GCPhys = NIL_RTGCPHYS;
447 pTlbe->pPage = 0;
448 pTlbe->pMap = 0;
449 pTlbe->pv = 0;
450#endif
451
452 /** @todo clear the RC TLB whenever we add it. */
453}
454
455/**
456 * Makes sure that there is at least one handy page ready for use.
457 *
458 * This will also take the appropriate actions when reaching water-marks.
459 *
460 * @returns VBox status code.
461 * @retval VINF_SUCCESS on success.
462 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
463 *
464 * @param pVM Pointer to the VM.
465 *
466 * @remarks Must be called from within the PGM critical section. It may
467 * nip back to ring-3/0 in some cases.
468 */
469static int pgmPhysEnsureHandyPage(PVM pVM)
470{
471 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
472
473 /*
474 * Do we need to do anything special?
475 */
476#ifdef IN_RING3
477 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
478#else
479 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
480#endif
481 {
482 /*
483 * Allocate pages only if we're out of them, or in ring-3, almost out.
484 */
485#ifdef IN_RING3
486 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
487#else
488 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
489#endif
490 {
491 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
492 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
493#ifdef IN_RING3
494 int rc = PGMR3PhysAllocateHandyPages(pVM);
495#else
496 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
497#endif
498 if (RT_UNLIKELY(rc != VINF_SUCCESS))
499 {
500 if (RT_FAILURE(rc))
501 return rc;
502 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
503 if (!pVM->pgm.s.cHandyPages)
504 {
505 LogRel(("PGM: no more handy pages!\n"));
506 return VERR_EM_NO_MEMORY;
507 }
508 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
509 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
510#ifdef IN_RING3
511# ifdef VBOX_WITH_REM
512 REMR3NotifyFF(pVM);
513# endif
514#else
515 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
516#endif
517 }
518 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
519 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
520 ("%u\n", pVM->pgm.s.cHandyPages),
521 VERR_PGM_HANDY_PAGE_IPE);
522 }
523 else
524 {
525 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
526 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
527#ifndef IN_RING3
528 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
529 {
530 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
531 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
532 }
533#endif
534 }
535 }
536
537 return VINF_SUCCESS;
538}
539
540
541/**
542 * Replace a zero or shared page with new page that we can write to.
543 *
544 * @returns The following VBox status codes.
545 * @retval VINF_SUCCESS on success, pPage is modified.
546 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
547 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
548 *
549 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
550 *
551 * @param pVM Pointer to the VM.
552 * @param pPage The physical page tracking structure. This will
553 * be modified on success.
554 * @param GCPhys The address of the page.
555 *
556 * @remarks Must be called from within the PGM critical section. It may
557 * nip back to ring-3/0 in some cases.
558 *
559 * @remarks This function shouldn't really fail, however if it does
560 * it probably means we've screwed up the size of handy pages and/or
561 * the low-water mark. Or, that some device I/O is causing a lot of
562 * pages to be allocated while while the host is in a low-memory
563 * condition. This latter should be handled elsewhere and in a more
564 * controlled manner, it's on the @bugref{3170} todo list...
565 */
566int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
567{
568 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
569
570 /*
571 * Prereqs.
572 */
573 PGM_LOCK_ASSERT_OWNER(pVM);
574 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
575 Assert(!PGM_PAGE_IS_MMIO(pPage));
576
577# ifdef PGM_WITH_LARGE_PAGES
578 /*
579 * Try allocate a large page if applicable.
580 */
581 if ( PGMIsUsingLargePages(pVM)
582 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
583 {
584 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
585 PPGMPAGE pBasePage;
586
587 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
588 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
589 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
590 {
591 rc = pgmPhysAllocLargePage(pVM, GCPhys);
592 if (rc == VINF_SUCCESS)
593 return rc;
594 }
595 /* Mark the base as type page table, so we don't check over and over again. */
596 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
597
598 /* fall back to 4KB pages. */
599 }
600# endif
601
602 /*
603 * Flush any shadow page table mappings of the page.
604 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
605 */
606 bool fFlushTLBs = false;
607 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
608 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
609
610 /*
611 * Ensure that we've got a page handy, take it and use it.
612 */
613 int rc2 = pgmPhysEnsureHandyPage(pVM);
614 if (RT_FAILURE(rc2))
615 {
616 if (fFlushTLBs)
617 PGM_INVL_ALL_VCPU_TLBS(pVM);
618 Assert(rc2 == VERR_EM_NO_MEMORY);
619 return rc2;
620 }
621 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
622 PGM_LOCK_ASSERT_OWNER(pVM);
623 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
624 Assert(!PGM_PAGE_IS_MMIO(pPage));
625
626 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
627 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
628 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
629 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
630 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
631 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
632
633 /*
634 * There are one or two action to be taken the next time we allocate handy pages:
635 * - Tell the GMM (global memory manager) what the page is being used for.
636 * (Speeds up replacement operations - sharing and defragmenting.)
637 * - If the current backing is shared, it must be freed.
638 */
639 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
640 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
641
642 void const *pvSharedPage = NULL;
643 if (PGM_PAGE_IS_SHARED(pPage))
644 {
645 /* Mark this shared page for freeing/dereferencing. */
646 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
647 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
648
649 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
650 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
651 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
652 pVM->pgm.s.cSharedPages--;
653
654 /* Grab the address of the page so we can make a copy later on. (safe) */
655 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
656 AssertRC(rc);
657 }
658 else
659 {
660 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
661 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
662 pVM->pgm.s.cZeroPages--;
663 }
664
665 /*
666 * Do the PGMPAGE modifications.
667 */
668 pVM->pgm.s.cPrivatePages++;
669 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
670 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
671 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
672 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
673 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
674
675 /* Copy the shared page contents to the replacement page. */
676 if (pvSharedPage)
677 {
678 /* Get the virtual address of the new page. */
679 PGMPAGEMAPLOCK PgMpLck;
680 void *pvNewPage;
681 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
682 if (RT_SUCCESS(rc))
683 {
684 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
685 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
686 }
687 }
688
689 if ( fFlushTLBs
690 && rc != VINF_PGM_GCPHYS_ALIASED)
691 PGM_INVL_ALL_VCPU_TLBS(pVM);
692 return rc;
693}
694
695#ifdef PGM_WITH_LARGE_PAGES
696
697/**
698 * Replace a 2 MB range of zero pages with new pages that we can write to.
699 *
700 * @returns The following VBox status codes.
701 * @retval VINF_SUCCESS on success, pPage is modified.
702 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
703 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
704 *
705 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
706 *
707 * @param pVM Pointer to the VM.
708 * @param GCPhys The address of the page.
709 *
710 * @remarks Must be called from within the PGM critical section. It may
711 * nip back to ring-3/0 in some cases.
712 */
713int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
714{
715 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
716 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
717
718 /*
719 * Prereqs.
720 */
721 PGM_LOCK_ASSERT_OWNER(pVM);
722 Assert(PGMIsUsingLargePages(pVM));
723
724 PPGMPAGE pFirstPage;
725 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
726 if ( RT_SUCCESS(rc)
727 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
728 {
729 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
730
731 /* Don't call this function for already allocated pages. */
732 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
733
734 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
735 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
736 {
737 /* Lazy approach: check all pages in the 2 MB range.
738 * The whole range must be ram and unallocated. */
739 GCPhys = GCPhysBase;
740 unsigned iPage;
741 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
742 {
743 PPGMPAGE pSubPage;
744 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
745 if ( RT_FAILURE(rc)
746 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
747 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
748 {
749 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
750 break;
751 }
752 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
753 GCPhys += PAGE_SIZE;
754 }
755 if (iPage != _2M/PAGE_SIZE)
756 {
757 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
758 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
759 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
760 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
761 }
762
763 /*
764 * Do the allocation.
765 */
766# ifdef IN_RING3
767 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
768# else
769 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
770# endif
771 if (RT_SUCCESS(rc))
772 {
773 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
774 pVM->pgm.s.cLargePages++;
775 return VINF_SUCCESS;
776 }
777
778 /* If we fail once, it most likely means the host's memory is too
779 fragmented; don't bother trying again. */
780 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
781 PGMSetLargePageUsage(pVM, false);
782 return rc;
783 }
784 }
785 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
786}
787
788
789/**
790 * Recheck the entire 2 MB range to see if we can use it again as a large page.
791 *
792 * @returns The following VBox status codes.
793 * @retval VINF_SUCCESS on success, the large page can be used again
794 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
795 *
796 * @param pVM Pointer to the VM.
797 * @param GCPhys The address of the page.
798 * @param pLargePage Page structure of the base page
799 */
800int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
801{
802 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
803
804 GCPhys &= X86_PDE2M_PAE_PG_MASK;
805
806 /* Check the base page. */
807 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
808 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
809 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
810 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
811 {
812 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
813 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
814 }
815
816 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
817 /* Check all remaining pages in the 2 MB range. */
818 unsigned i;
819 GCPhys += PAGE_SIZE;
820 for (i = 1; i < _2M/PAGE_SIZE; i++)
821 {
822 PPGMPAGE pPage;
823 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
824 AssertRCBreak(rc);
825
826 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
827 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
828 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
829 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
830 {
831 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
832 break;
833 }
834
835 GCPhys += PAGE_SIZE;
836 }
837 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
838
839 if (i == _2M/PAGE_SIZE)
840 {
841 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
842 pVM->pgm.s.cLargePagesDisabled--;
843 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
844 return VINF_SUCCESS;
845 }
846
847 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
848}
849
850#endif /* PGM_WITH_LARGE_PAGES */
851
852/**
853 * Deal with a write monitored page.
854 *
855 * @returns VBox strict status code.
856 *
857 * @param pVM Pointer to the VM.
858 * @param pPage The physical page tracking structure.
859 *
860 * @remarks Called from within the PGM critical section.
861 */
862void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
863{
864 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
865 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
866 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
867 Assert(pVM->pgm.s.cMonitoredPages > 0);
868 pVM->pgm.s.cMonitoredPages--;
869 pVM->pgm.s.cWrittenToPages++;
870}
871
872
873/**
874 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
875 *
876 * @returns VBox strict status code.
877 * @retval VINF_SUCCESS on success.
878 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
879 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
880 *
881 * @param pVM Pointer to the VM.
882 * @param pPage The physical page tracking structure.
883 * @param GCPhys The address of the page.
884 *
885 * @remarks Called from within the PGM critical section.
886 */
887int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
888{
889 PGM_LOCK_ASSERT_OWNER(pVM);
890 switch (PGM_PAGE_GET_STATE(pPage))
891 {
892 case PGM_PAGE_STATE_WRITE_MONITORED:
893 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
894 /* fall thru */
895 default: /* to shut up GCC */
896 case PGM_PAGE_STATE_ALLOCATED:
897 return VINF_SUCCESS;
898
899 /*
900 * Zero pages can be dummy pages for MMIO or reserved memory,
901 * so we need to check the flags before joining cause with
902 * shared page replacement.
903 */
904 case PGM_PAGE_STATE_ZERO:
905 if (PGM_PAGE_IS_MMIO(pPage))
906 return VERR_PGM_PHYS_PAGE_RESERVED;
907 /* fall thru */
908 case PGM_PAGE_STATE_SHARED:
909 return pgmPhysAllocPage(pVM, pPage, GCPhys);
910
911 /* Not allowed to write to ballooned pages. */
912 case PGM_PAGE_STATE_BALLOONED:
913 return VERR_PGM_PHYS_PAGE_BALLOONED;
914 }
915}
916
917
918/**
919 * Internal usage: Map the page specified by its GMM ID.
920 *
921 * This is similar to pgmPhysPageMap
922 *
923 * @returns VBox status code.
924 *
925 * @param pVM Pointer to the VM.
926 * @param idPage The Page ID.
927 * @param HCPhys The physical address (for RC).
928 * @param ppv Where to store the mapping address.
929 *
930 * @remarks Called from within the PGM critical section. The mapping is only
931 * valid while you are inside this section.
932 */
933int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
934{
935 /*
936 * Validation.
937 */
938 PGM_LOCK_ASSERT_OWNER(pVM);
939 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
940 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
941 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
942
943#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
944 /*
945 * Map it by HCPhys.
946 */
947 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
948
949#else
950 /*
951 * Find/make Chunk TLB entry for the mapping chunk.
952 */
953 PPGMCHUNKR3MAP pMap;
954 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
955 if (pTlbe->idChunk == idChunk)
956 {
957 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
958 pMap = pTlbe->pChunk;
959 }
960 else
961 {
962 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
963
964 /*
965 * Find the chunk, map it if necessary.
966 */
967 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
968 if (pMap)
969 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
970 else
971 {
972# ifdef IN_RING0
973 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
974 AssertRCReturn(rc, rc);
975 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
976 Assert(pMap);
977# else
978 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
979 if (RT_FAILURE(rc))
980 return rc;
981# endif
982 }
983
984 /*
985 * Enter it into the Chunk TLB.
986 */
987 pTlbe->idChunk = idChunk;
988 pTlbe->pChunk = pMap;
989 }
990
991 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
992 return VINF_SUCCESS;
993#endif
994}
995
996
997/**
998 * Maps a page into the current virtual address space so it can be accessed.
999 *
1000 * @returns VBox status code.
1001 * @retval VINF_SUCCESS on success.
1002 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1003 *
1004 * @param pVM Pointer to the VM.
1005 * @param pPage The physical page tracking structure.
1006 * @param GCPhys The address of the page.
1007 * @param ppMap Where to store the address of the mapping tracking structure.
1008 * @param ppv Where to store the mapping address of the page. The page
1009 * offset is masked off!
1010 *
1011 * @remarks Called from within the PGM critical section.
1012 */
1013static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1014{
1015 PGM_LOCK_ASSERT_OWNER(pVM);
1016
1017#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1018 /*
1019 * Just some sketchy GC/R0-darwin code.
1020 */
1021 *ppMap = NULL;
1022 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1023 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1024 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1025 NOREF(GCPhys);
1026 return VINF_SUCCESS;
1027
1028#else /* IN_RING3 || IN_RING0 */
1029
1030
1031 /*
1032 * Special case: ZERO and MMIO2 pages.
1033 */
1034 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1035 if (idChunk == NIL_GMM_CHUNKID)
1036 {
1037 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1038 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
1039 {
1040 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
1041 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1042 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_2);
1043 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
1044 }
1045 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1046 {
1047 /** @todo deal with aliased MMIO2 pages somehow...
1048 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
1049 * them, that would also avoid this mess. It would actually be kind of
1050 * elegant... */
1051 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_PGM_MAP_MMIO2_ALIAS_MMIO);
1052 }
1053 else
1054 {
1055 /** @todo handle MMIO2 */
1056 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1057 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
1058 ("pPage=%R[pgmpage]\n", pPage),
1059 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1060 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1061 }
1062 *ppMap = NULL;
1063 return VINF_SUCCESS;
1064 }
1065
1066 /*
1067 * Find/make Chunk TLB entry for the mapping chunk.
1068 */
1069 PPGMCHUNKR3MAP pMap;
1070 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1071 if (pTlbe->idChunk == idChunk)
1072 {
1073 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1074 pMap = pTlbe->pChunk;
1075 AssertPtr(pMap->pv);
1076 }
1077 else
1078 {
1079 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1080
1081 /*
1082 * Find the chunk, map it if necessary.
1083 */
1084 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1085 if (pMap)
1086 {
1087 AssertPtr(pMap->pv);
1088 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1089 }
1090 else
1091 {
1092#ifdef IN_RING0
1093 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1094 AssertRCReturn(rc, rc);
1095 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1096 Assert(pMap);
1097#else
1098 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1099 if (RT_FAILURE(rc))
1100 return rc;
1101#endif
1102 AssertPtr(pMap->pv);
1103 }
1104
1105 /*
1106 * Enter it into the Chunk TLB.
1107 */
1108 pTlbe->idChunk = idChunk;
1109 pTlbe->pChunk = pMap;
1110 }
1111
1112 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1113 *ppMap = pMap;
1114 return VINF_SUCCESS;
1115#endif /* IN_RING3 */
1116}
1117
1118
1119/**
1120 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1121 *
1122 * This is typically used is paths where we cannot use the TLB methods (like ROM
1123 * pages) or where there is no point in using them since we won't get many hits.
1124 *
1125 * @returns VBox strict status code.
1126 * @retval VINF_SUCCESS on success.
1127 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1128 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1129 *
1130 * @param pVM Pointer to the VM.
1131 * @param pPage The physical page tracking structure.
1132 * @param GCPhys The address of the page.
1133 * @param ppv Where to store the mapping address of the page. The page
1134 * offset is masked off!
1135 *
1136 * @remarks Called from within the PGM critical section. The mapping is only
1137 * valid while you are inside section.
1138 */
1139int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1140{
1141 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1142 if (RT_SUCCESS(rc))
1143 {
1144 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1145 PPGMPAGEMAP pMapIgnore;
1146 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1147 if (RT_FAILURE(rc2)) /* preserve rc */
1148 rc = rc2;
1149 }
1150 return rc;
1151}
1152
1153
1154/**
1155 * Maps a page into the current virtual address space so it can be accessed for
1156 * both writing and reading.
1157 *
1158 * This is typically used is paths where we cannot use the TLB methods (like ROM
1159 * pages) or where there is no point in using them since we won't get many hits.
1160 *
1161 * @returns VBox status code.
1162 * @retval VINF_SUCCESS on success.
1163 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1164 *
1165 * @param pVM Pointer to the VM.
1166 * @param pPage The physical page tracking structure. Must be in the
1167 * allocated state.
1168 * @param GCPhys The address of the page.
1169 * @param ppv Where to store the mapping address of the page. The page
1170 * offset is masked off!
1171 *
1172 * @remarks Called from within the PGM critical section. The mapping is only
1173 * valid while you are inside section.
1174 */
1175int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1176{
1177 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1178 PPGMPAGEMAP pMapIgnore;
1179 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1180}
1181
1182
1183/**
1184 * Maps a page into the current virtual address space so it can be accessed for
1185 * reading.
1186 *
1187 * This is typically used is paths where we cannot use the TLB methods (like ROM
1188 * pages) or where there is no point in using them since we won't get many hits.
1189 *
1190 * @returns VBox status code.
1191 * @retval VINF_SUCCESS on success.
1192 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1193 *
1194 * @param pVM Pointer to the VM.
1195 * @param pPage The physical page tracking structure.
1196 * @param GCPhys The address of the page.
1197 * @param ppv Where to store the mapping address of the page. The page
1198 * offset is masked off!
1199 *
1200 * @remarks Called from within the PGM critical section. The mapping is only
1201 * valid while you are inside this section.
1202 */
1203int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1204{
1205 PPGMPAGEMAP pMapIgnore;
1206 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1207}
1208
1209#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1210
1211/**
1212 * Load a guest page into the ring-3 physical TLB.
1213 *
1214 * @returns VBox status code.
1215 * @retval VINF_SUCCESS on success
1216 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1217 * @param pPGM The PGM instance pointer.
1218 * @param GCPhys The guest physical address in question.
1219 */
1220int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1221{
1222 PGM_LOCK_ASSERT_OWNER(pVM);
1223
1224 /*
1225 * Find the ram range and page and hand it over to the with-page function.
1226 * 99.8% of requests are expected to be in the first range.
1227 */
1228 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1229 if (!pPage)
1230 {
1231 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1232 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1233 }
1234
1235 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1236}
1237
1238
1239/**
1240 * Load a guest page into the ring-3 physical TLB.
1241 *
1242 * @returns VBox status code.
1243 * @retval VINF_SUCCESS on success
1244 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1245 *
1246 * @param pVM Pointer to the VM.
1247 * @param pPage Pointer to the PGMPAGE structure corresponding to
1248 * GCPhys.
1249 * @param GCPhys The guest physical address in question.
1250 */
1251int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1252{
1253 PGM_LOCK_ASSERT_OWNER(pVM);
1254 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1255
1256 /*
1257 * Map the page.
1258 * Make a special case for the zero page as it is kind of special.
1259 */
1260 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1261 if ( !PGM_PAGE_IS_ZERO(pPage)
1262 && !PGM_PAGE_IS_BALLOONED(pPage))
1263 {
1264 void *pv;
1265 PPGMPAGEMAP pMap;
1266 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1267 if (RT_FAILURE(rc))
1268 return rc;
1269 pTlbe->pMap = pMap;
1270 pTlbe->pv = pv;
1271 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1272 }
1273 else
1274 {
1275 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1276 pTlbe->pMap = NULL;
1277 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1278 }
1279#ifdef PGM_WITH_PHYS_TLB
1280 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1281 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1282 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1283 else
1284 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1285#else
1286 pTlbe->GCPhys = NIL_RTGCPHYS;
1287#endif
1288 pTlbe->pPage = pPage;
1289 return VINF_SUCCESS;
1290}
1291
1292#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1293
1294/**
1295 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1296 * own the PGM lock and therefore not need to lock the mapped page.
1297 *
1298 * @returns VBox status code.
1299 * @retval VINF_SUCCESS on success.
1300 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1301 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1302 *
1303 * @param pVM Pointer to the VM.
1304 * @param GCPhys The guest physical address of the page that should be mapped.
1305 * @param pPage Pointer to the PGMPAGE structure for the page.
1306 * @param ppv Where to store the address corresponding to GCPhys.
1307 *
1308 * @internal
1309 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1310 */
1311int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1312{
1313 int rc;
1314 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1315 PGM_LOCK_ASSERT_OWNER(pVM);
1316 pVM->pgm.s.cDeprecatedPageLocks++;
1317
1318 /*
1319 * Make sure the page is writable.
1320 */
1321 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1322 {
1323 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1324 if (RT_FAILURE(rc))
1325 return rc;
1326 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1327 }
1328 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1329
1330 /*
1331 * Get the mapping address.
1332 */
1333#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1334 void *pv;
1335 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1336 PGM_PAGE_GET_HCPHYS(pPage),
1337 &pv
1338 RTLOG_COMMA_SRC_POS);
1339 if (RT_FAILURE(rc))
1340 return rc;
1341 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1342#else
1343 PPGMPAGEMAPTLBE pTlbe;
1344 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1345 if (RT_FAILURE(rc))
1346 return rc;
1347 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1348#endif
1349 return VINF_SUCCESS;
1350}
1351
1352#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1353
1354/**
1355 * Locks a page mapping for writing.
1356 *
1357 * @param pVM Pointer to the VM.
1358 * @param pPage The page.
1359 * @param pTlbe The mapping TLB entry for the page.
1360 * @param pLock The lock structure (output).
1361 */
1362DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1363{
1364 PPGMPAGEMAP pMap = pTlbe->pMap;
1365 if (pMap)
1366 pMap->cRefs++;
1367
1368 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1369 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1370 {
1371 if (cLocks == 0)
1372 pVM->pgm.s.cWriteLockedPages++;
1373 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1374 }
1375 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1376 {
1377 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1378 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1379 if (pMap)
1380 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1381 }
1382
1383 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1384 pLock->pvMap = pMap;
1385}
1386
1387/**
1388 * Locks a page mapping for reading.
1389 *
1390 * @param pVM Pointer to the VM.
1391 * @param pPage The page.
1392 * @param pTlbe The mapping TLB entry for the page.
1393 * @param pLock The lock structure (output).
1394 */
1395DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1396{
1397 PPGMPAGEMAP pMap = pTlbe->pMap;
1398 if (pMap)
1399 pMap->cRefs++;
1400
1401 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1402 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1403 {
1404 if (cLocks == 0)
1405 pVM->pgm.s.cReadLockedPages++;
1406 PGM_PAGE_INC_READ_LOCKS(pPage);
1407 }
1408 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1409 {
1410 PGM_PAGE_INC_READ_LOCKS(pPage);
1411 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1412 if (pMap)
1413 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1414 }
1415
1416 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1417 pLock->pvMap = pMap;
1418}
1419
1420#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1421
1422
1423/**
1424 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1425 * own the PGM lock and have access to the page structure.
1426 *
1427 * @returns VBox status code.
1428 * @retval VINF_SUCCESS on success.
1429 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1430 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1431 *
1432 * @param pVM Pointer to the VM.
1433 * @param GCPhys The guest physical address of the page that should be mapped.
1434 * @param pPage Pointer to the PGMPAGE structure for the page.
1435 * @param ppv Where to store the address corresponding to GCPhys.
1436 * @param pLock Where to store the lock information that
1437 * pgmPhysReleaseInternalPageMappingLock needs.
1438 *
1439 * @internal
1440 */
1441int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1442{
1443 int rc;
1444 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1445 PGM_LOCK_ASSERT_OWNER(pVM);
1446
1447 /*
1448 * Make sure the page is writable.
1449 */
1450 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1451 {
1452 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1453 if (RT_FAILURE(rc))
1454 return rc;
1455 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1456 }
1457 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1458
1459 /*
1460 * Do the job.
1461 */
1462#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1463 void *pv;
1464 PVMCPU pVCpu = VMMGetCpu(pVM);
1465 rc = pgmRZDynMapHCPageInlined(pVCpu,
1466 PGM_PAGE_GET_HCPHYS(pPage),
1467 &pv
1468 RTLOG_COMMA_SRC_POS);
1469 if (RT_FAILURE(rc))
1470 return rc;
1471 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1472 pLock->pvPage = pv;
1473 pLock->pVCpu = pVCpu;
1474
1475#else
1476 PPGMPAGEMAPTLBE pTlbe;
1477 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1478 if (RT_FAILURE(rc))
1479 return rc;
1480 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1481 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1482#endif
1483 return VINF_SUCCESS;
1484}
1485
1486
1487/**
1488 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1489 * own the PGM lock and have access to the page structure.
1490 *
1491 * @returns VBox status code.
1492 * @retval VINF_SUCCESS on success.
1493 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1494 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1495 *
1496 * @param pVM Pointer to the VM.
1497 * @param GCPhys The guest physical address of the page that should be mapped.
1498 * @param pPage Pointer to the PGMPAGE structure for the page.
1499 * @param ppv Where to store the address corresponding to GCPhys.
1500 * @param pLock Where to store the lock information that
1501 * pgmPhysReleaseInternalPageMappingLock needs.
1502 *
1503 * @internal
1504 */
1505int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1506{
1507 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1508 PGM_LOCK_ASSERT_OWNER(pVM);
1509 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1510
1511 /*
1512 * Do the job.
1513 */
1514#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1515 void *pv;
1516 PVMCPU pVCpu = VMMGetCpu(pVM);
1517 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1518 PGM_PAGE_GET_HCPHYS(pPage),
1519 &pv
1520 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1521 if (RT_FAILURE(rc))
1522 return rc;
1523 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1524 pLock->pvPage = pv;
1525 pLock->pVCpu = pVCpu;
1526
1527#else
1528 PPGMPAGEMAPTLBE pTlbe;
1529 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1530 if (RT_FAILURE(rc))
1531 return rc;
1532 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1533 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1534#endif
1535 return VINF_SUCCESS;
1536}
1537
1538
1539/**
1540 * Requests the mapping of a guest page into the current context.
1541 *
1542 * This API should only be used for very short term, as it will consume scarse
1543 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1544 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1545 *
1546 * This API will assume your intention is to write to the page, and will
1547 * therefore replace shared and zero pages. If you do not intend to modify
1548 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1549 *
1550 * @returns VBox status code.
1551 * @retval VINF_SUCCESS on success.
1552 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1553 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1554 *
1555 * @param pVM Pointer to the VM.
1556 * @param GCPhys The guest physical address of the page that should be
1557 * mapped.
1558 * @param ppv Where to store the address corresponding to GCPhys.
1559 * @param pLock Where to store the lock information that
1560 * PGMPhysReleasePageMappingLock needs.
1561 *
1562 * @remarks The caller is responsible for dealing with access handlers.
1563 * @todo Add an informational return code for pages with access handlers?
1564 *
1565 * @remark Avoid calling this API from within critical sections (other than
1566 * the PGM one) because of the deadlock risk. External threads may
1567 * need to delegate jobs to the EMTs.
1568 * @remarks Only one page is mapped! Make no assumption about what's after or
1569 * before the returned page!
1570 * @thread Any thread.
1571 */
1572VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1573{
1574 int rc = pgmLock(pVM);
1575 AssertRCReturn(rc, rc);
1576
1577#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1578 /*
1579 * Find the page and make sure it's writable.
1580 */
1581 PPGMPAGE pPage;
1582 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1583 if (RT_SUCCESS(rc))
1584 {
1585 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1586 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1587 if (RT_SUCCESS(rc))
1588 {
1589 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1590
1591 PVMCPU pVCpu = VMMGetCpu(pVM);
1592 void *pv;
1593 rc = pgmRZDynMapHCPageInlined(pVCpu,
1594 PGM_PAGE_GET_HCPHYS(pPage),
1595 &pv
1596 RTLOG_COMMA_SRC_POS);
1597 if (RT_SUCCESS(rc))
1598 {
1599 AssertRCSuccess(rc);
1600
1601 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1602 *ppv = pv;
1603 pLock->pvPage = pv;
1604 pLock->pVCpu = pVCpu;
1605 }
1606 }
1607 }
1608
1609#else /* IN_RING3 || IN_RING0 */
1610 /*
1611 * Query the Physical TLB entry for the page (may fail).
1612 */
1613 PPGMPAGEMAPTLBE pTlbe;
1614 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1615 if (RT_SUCCESS(rc))
1616 {
1617 /*
1618 * If the page is shared, the zero page, or being write monitored
1619 * it must be converted to a page that's writable if possible.
1620 */
1621 PPGMPAGE pPage = pTlbe->pPage;
1622 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1623 {
1624 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1625 if (RT_SUCCESS(rc))
1626 {
1627 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1628 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1629 }
1630 }
1631 if (RT_SUCCESS(rc))
1632 {
1633 /*
1634 * Now, just perform the locking and calculate the return address.
1635 */
1636 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1637 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1638 }
1639 }
1640
1641#endif /* IN_RING3 || IN_RING0 */
1642 pgmUnlock(pVM);
1643 return rc;
1644}
1645
1646
1647/**
1648 * Requests the mapping of a guest page into the current context.
1649 *
1650 * This API should only be used for very short term, as it will consume scarse
1651 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1652 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1653 *
1654 * @returns VBox status code.
1655 * @retval VINF_SUCCESS on success.
1656 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1657 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1658 *
1659 * @param pVM Pointer to the VM.
1660 * @param GCPhys The guest physical address of the page that should be
1661 * mapped.
1662 * @param ppv Where to store the address corresponding to GCPhys.
1663 * @param pLock Where to store the lock information that
1664 * PGMPhysReleasePageMappingLock needs.
1665 *
1666 * @remarks The caller is responsible for dealing with access handlers.
1667 * @todo Add an informational return code for pages with access handlers?
1668 *
1669 * @remarks Avoid calling this API from within critical sections (other than
1670 * the PGM one) because of the deadlock risk.
1671 * @remarks Only one page is mapped! Make no assumption about what's after or
1672 * before the returned page!
1673 * @thread Any thread.
1674 */
1675VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1676{
1677 int rc = pgmLock(pVM);
1678 AssertRCReturn(rc, rc);
1679
1680#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1681 /*
1682 * Find the page and make sure it's readable.
1683 */
1684 PPGMPAGE pPage;
1685 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1686 if (RT_SUCCESS(rc))
1687 {
1688 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1689 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1690 else
1691 {
1692 PVMCPU pVCpu = VMMGetCpu(pVM);
1693 void *pv;
1694 rc = pgmRZDynMapHCPageInlined(pVCpu,
1695 PGM_PAGE_GET_HCPHYS(pPage),
1696 &pv
1697 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1698 if (RT_SUCCESS(rc))
1699 {
1700 AssertRCSuccess(rc);
1701
1702 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1703 *ppv = pv;
1704 pLock->pvPage = pv;
1705 pLock->pVCpu = pVCpu;
1706 }
1707 }
1708 }
1709
1710#else /* IN_RING3 || IN_RING0 */
1711 /*
1712 * Query the Physical TLB entry for the page (may fail).
1713 */
1714 PPGMPAGEMAPTLBE pTlbe;
1715 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1716 if (RT_SUCCESS(rc))
1717 {
1718 /* MMIO pages doesn't have any readable backing. */
1719 PPGMPAGE pPage = pTlbe->pPage;
1720 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1721 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1722 else
1723 {
1724 /*
1725 * Now, just perform the locking and calculate the return address.
1726 */
1727 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1728 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1729 }
1730 }
1731
1732#endif /* IN_RING3 || IN_RING0 */
1733 pgmUnlock(pVM);
1734 return rc;
1735}
1736
1737
1738/**
1739 * Requests the mapping of a guest page given by virtual address into the current context.
1740 *
1741 * This API should only be used for very short term, as it will consume
1742 * scarse resources (R0 and GC) in the mapping cache. When you're done
1743 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1744 *
1745 * This API will assume your intention is to write to the page, and will
1746 * therefore replace shared and zero pages. If you do not intend to modify
1747 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1748 *
1749 * @returns VBox status code.
1750 * @retval VINF_SUCCESS on success.
1751 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1752 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1753 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1754 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1755 *
1756 * @param pVCpu Pointer to the VMCPU.
1757 * @param GCPhys The guest physical address of the page that should be mapped.
1758 * @param ppv Where to store the address corresponding to GCPhys.
1759 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1760 *
1761 * @remark Avoid calling this API from within critical sections (other than
1762 * the PGM one) because of the deadlock risk.
1763 * @thread EMT
1764 */
1765VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1766{
1767 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1768 RTGCPHYS GCPhys;
1769 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1770 if (RT_SUCCESS(rc))
1771 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1772 return rc;
1773}
1774
1775
1776/**
1777 * Requests the mapping of a guest page given by virtual address into the current context.
1778 *
1779 * This API should only be used for very short term, as it will consume
1780 * scarse resources (R0 and GC) in the mapping cache. When you're done
1781 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1782 *
1783 * @returns VBox status code.
1784 * @retval VINF_SUCCESS on success.
1785 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1786 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1787 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1788 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1789 *
1790 * @param pVCpu Pointer to the VMCPU.
1791 * @param GCPhys The guest physical address of the page that should be mapped.
1792 * @param ppv Where to store the address corresponding to GCPhys.
1793 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1794 *
1795 * @remark Avoid calling this API from within critical sections (other than
1796 * the PGM one) because of the deadlock risk.
1797 * @thread EMT
1798 */
1799VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1800{
1801 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1802 RTGCPHYS GCPhys;
1803 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1804 if (RT_SUCCESS(rc))
1805 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1806 return rc;
1807}
1808
1809
1810/**
1811 * Release the mapping of a guest page.
1812 *
1813 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1814 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1815 *
1816 * @param pVM Pointer to the VM.
1817 * @param pLock The lock structure initialized by the mapping function.
1818 */
1819VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1820{
1821#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1822 Assert(pLock->pvPage != NULL);
1823 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1824 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1825 pLock->pVCpu = NULL;
1826 pLock->pvPage = NULL;
1827
1828#else
1829 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1830 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1831 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1832
1833 pLock->uPageAndType = 0;
1834 pLock->pvMap = NULL;
1835
1836 pgmLock(pVM);
1837 if (fWriteLock)
1838 {
1839 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1840 Assert(cLocks > 0);
1841 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1842 {
1843 if (cLocks == 1)
1844 {
1845 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1846 pVM->pgm.s.cWriteLockedPages--;
1847 }
1848 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1849 }
1850
1851 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1852 {
1853 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1854 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1855 Assert(pVM->pgm.s.cMonitoredPages > 0);
1856 pVM->pgm.s.cMonitoredPages--;
1857 pVM->pgm.s.cWrittenToPages++;
1858 }
1859 }
1860 else
1861 {
1862 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1863 Assert(cLocks > 0);
1864 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1865 {
1866 if (cLocks == 1)
1867 {
1868 Assert(pVM->pgm.s.cReadLockedPages > 0);
1869 pVM->pgm.s.cReadLockedPages--;
1870 }
1871 PGM_PAGE_DEC_READ_LOCKS(pPage);
1872 }
1873 }
1874
1875 if (pMap)
1876 {
1877 Assert(pMap->cRefs >= 1);
1878 pMap->cRefs--;
1879 }
1880 pgmUnlock(pVM);
1881#endif /* IN_RING3 */
1882}
1883
1884
1885/**
1886 * Release the internal mapping of a guest page.
1887 *
1888 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1889 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1890 *
1891 * @param pVM Pointer to the VM.
1892 * @param pLock The lock structure initialized by the mapping function.
1893 *
1894 * @remarks Caller must hold the PGM lock.
1895 */
1896void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1897{
1898 PGM_LOCK_ASSERT_OWNER(pVM);
1899 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1900}
1901
1902
1903/**
1904 * Converts a GC physical address to a HC ring-3 pointer.
1905 *
1906 * @returns VINF_SUCCESS on success.
1907 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1908 * page but has no physical backing.
1909 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1910 * GC physical address.
1911 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1912 * a dynamic ram chunk boundary
1913 *
1914 * @param pVM Pointer to the VM.
1915 * @param GCPhys The GC physical address to convert.
1916 * @param pR3Ptr Where to store the R3 pointer on success.
1917 *
1918 * @deprecated Avoid when possible!
1919 */
1920int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1921{
1922/** @todo this is kind of hacky and needs some more work. */
1923#ifndef DEBUG_sandervl
1924 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1925#endif
1926
1927 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1928#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1929 NOREF(pVM); NOREF(pR3Ptr);
1930 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1931#else
1932 pgmLock(pVM);
1933
1934 PPGMRAMRANGE pRam;
1935 PPGMPAGE pPage;
1936 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1937 if (RT_SUCCESS(rc))
1938 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1939
1940 pgmUnlock(pVM);
1941 Assert(rc <= VINF_SUCCESS);
1942 return rc;
1943#endif
1944}
1945
1946#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1947
1948/**
1949 * Maps and locks a guest CR3 or PD (PAE) page.
1950 *
1951 * @returns VINF_SUCCESS on success.
1952 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1953 * page but has no physical backing.
1954 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1955 * GC physical address.
1956 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1957 * a dynamic ram chunk boundary
1958 *
1959 * @param pVM Pointer to the VM.
1960 * @param GCPhys The GC physical address to convert.
1961 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1962 * may not be valid in ring-0 depending on the
1963 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1964 *
1965 * @remarks The caller must own the PGM lock.
1966 */
1967int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1968{
1969
1970 PPGMRAMRANGE pRam;
1971 PPGMPAGE pPage;
1972 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1973 if (RT_SUCCESS(rc))
1974 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1975 Assert(rc <= VINF_SUCCESS);
1976 return rc;
1977}
1978
1979
1980int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1981{
1982
1983}
1984
1985#endif
1986
1987/**
1988 * Converts a guest pointer to a GC physical address.
1989 *
1990 * This uses the current CR3/CR0/CR4 of the guest.
1991 *
1992 * @returns VBox status code.
1993 * @param pVCpu Pointer to the VMCPU.
1994 * @param GCPtr The guest pointer to convert.
1995 * @param pGCPhys Where to store the GC physical address.
1996 */
1997VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1998{
1999 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2000 if (pGCPhys && RT_SUCCESS(rc))
2001 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2002 return rc;
2003}
2004
2005
2006/**
2007 * Converts a guest pointer to a HC physical address.
2008 *
2009 * This uses the current CR3/CR0/CR4 of the guest.
2010 *
2011 * @returns VBox status code.
2012 * @param pVCpu Pointer to the VMCPU.
2013 * @param GCPtr The guest pointer to convert.
2014 * @param pHCPhys Where to store the HC physical address.
2015 */
2016VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2017{
2018 PVM pVM = pVCpu->CTX_SUFF(pVM);
2019 RTGCPHYS GCPhys;
2020 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2021 if (RT_SUCCESS(rc))
2022 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2023 return rc;
2024}
2025
2026
2027
2028#undef LOG_GROUP
2029#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2030
2031
2032#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2033/**
2034 * Cache PGMPhys memory access
2035 *
2036 * @param pVM Pointer to the VM.
2037 * @param pCache Cache structure pointer
2038 * @param GCPhys GC physical address
2039 * @param pbHC HC pointer corresponding to physical page
2040 *
2041 * @thread EMT.
2042 */
2043static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2044{
2045 uint32_t iCacheIndex;
2046
2047 Assert(VM_IS_EMT(pVM));
2048
2049 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2050 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2051
2052 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2053
2054 ASMBitSet(&pCache->aEntries, iCacheIndex);
2055
2056 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2057 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2058}
2059#endif /* IN_RING3 */
2060
2061
2062/**
2063 * Deals with reading from a page with one or more ALL access handlers.
2064 *
2065 * @returns VBox status code. Can be ignored in ring-3.
2066 * @retval VINF_SUCCESS.
2067 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2068 *
2069 * @param pVM Pointer to the VM.
2070 * @param pPage The page descriptor.
2071 * @param GCPhys The physical address to start reading at.
2072 * @param pvBuf Where to put the bits we read.
2073 * @param cb How much to read - less or equal to a page.
2074 */
2075static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
2076{
2077 /*
2078 * The most frequent access here is MMIO and shadowed ROM.
2079 * The current code ASSUMES all these access handlers covers full pages!
2080 */
2081
2082 /*
2083 * Whatever we do we need the source page, map it first.
2084 */
2085 PGMPAGEMAPLOCK PgMpLck;
2086 const void *pvSrc = NULL;
2087 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2088 if (RT_FAILURE(rc))
2089 {
2090 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2091 GCPhys, pPage, rc));
2092 memset(pvBuf, 0xff, cb);
2093 return VINF_SUCCESS;
2094 }
2095 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2096
2097 /*
2098 * Deal with any physical handlers.
2099 */
2100#ifdef IN_RING3
2101 PPGMPHYSHANDLER pPhys = NULL;
2102#endif
2103 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
2104 {
2105#ifdef IN_RING3
2106 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2107 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2108 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2109 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2110 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2111 Assert(pPhys->CTX_SUFF(pfnHandler));
2112
2113 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2114 void *pvUser = pPhys->CTX_SUFF(pvUser);
2115
2116 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2117 STAM_PROFILE_START(&pPhys->Stat, h);
2118 PGM_LOCK_ASSERT_OWNER(pVM);
2119 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2120 pgmUnlock(pVM);
2121 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2122 pgmLock(pVM);
2123# ifdef VBOX_WITH_STATISTICS
2124 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2125 if (pPhys)
2126 STAM_PROFILE_STOP(&pPhys->Stat, h);
2127# else
2128 pPhys = NULL; /* might not be valid anymore. */
2129# endif
2130 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2131#else
2132 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2133 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2134 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2135 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2136#endif
2137 }
2138
2139 /*
2140 * Deal with any virtual handlers.
2141 */
2142 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2143 {
2144 unsigned iPage;
2145 PPGMVIRTHANDLER pVirt;
2146
2147 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2148 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2149 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2150 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2151 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2152
2153#ifdef IN_RING3
2154 if (pVirt->pfnHandlerR3)
2155 {
2156 if (!pPhys)
2157 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2158 else
2159 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2160 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2161 + (iPage << PAGE_SHIFT)
2162 + (GCPhys & PAGE_OFFSET_MASK);
2163
2164 STAM_PROFILE_START(&pVirt->Stat, h);
2165 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
2166 STAM_PROFILE_STOP(&pVirt->Stat, h);
2167 if (rc2 == VINF_SUCCESS)
2168 rc = VINF_SUCCESS;
2169 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2170 }
2171 else
2172 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2173#else
2174 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2175 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2176 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2177 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2178#endif
2179 }
2180
2181 /*
2182 * Take the default action.
2183 */
2184 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2185 memcpy(pvBuf, pvSrc, cb);
2186 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2187 return rc;
2188}
2189
2190
2191/**
2192 * Read physical memory.
2193 *
2194 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2195 * want to ignore those.
2196 *
2197 * @returns VBox status code. Can be ignored in ring-3.
2198 * @retval VINF_SUCCESS.
2199 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2200 *
2201 * @param pVM Pointer to the VM.
2202 * @param GCPhys Physical address start reading from.
2203 * @param pvBuf Where to put the read bits.
2204 * @param cbRead How many bytes to read.
2205 */
2206VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
2207{
2208 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2209 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2210
2211 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2212 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2213
2214 pgmLock(pVM);
2215
2216 /*
2217 * Copy loop on ram ranges.
2218 */
2219 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2220 for (;;)
2221 {
2222 /* Inside range or not? */
2223 if (pRam && GCPhys >= pRam->GCPhys)
2224 {
2225 /*
2226 * Must work our way thru this page by page.
2227 */
2228 RTGCPHYS off = GCPhys - pRam->GCPhys;
2229 while (off < pRam->cb)
2230 {
2231 unsigned iPage = off >> PAGE_SHIFT;
2232 PPGMPAGE pPage = &pRam->aPages[iPage];
2233 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2234 if (cb > cbRead)
2235 cb = cbRead;
2236
2237 /*
2238 * Any ALL access handlers?
2239 */
2240 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
2241 {
2242 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2243 if (RT_FAILURE(rc))
2244 {
2245 pgmUnlock(pVM);
2246 return rc;
2247 }
2248 }
2249 else
2250 {
2251 /*
2252 * Get the pointer to the page.
2253 */
2254 PGMPAGEMAPLOCK PgMpLck;
2255 const void *pvSrc;
2256 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2257 if (RT_SUCCESS(rc))
2258 {
2259 memcpy(pvBuf, pvSrc, cb);
2260 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2261 }
2262 else
2263 {
2264 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2265 pRam->GCPhys + off, pPage, rc));
2266 memset(pvBuf, 0xff, cb);
2267 }
2268 }
2269
2270 /* next page */
2271 if (cb >= cbRead)
2272 {
2273 pgmUnlock(pVM);
2274 return VINF_SUCCESS;
2275 }
2276 cbRead -= cb;
2277 off += cb;
2278 pvBuf = (char *)pvBuf + cb;
2279 } /* walk pages in ram range. */
2280
2281 GCPhys = pRam->GCPhysLast + 1;
2282 }
2283 else
2284 {
2285 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2286
2287 /*
2288 * Unassigned address space.
2289 */
2290 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2291 if (cb >= cbRead)
2292 {
2293 memset(pvBuf, 0xff, cbRead);
2294 break;
2295 }
2296 memset(pvBuf, 0xff, cb);
2297
2298 cbRead -= cb;
2299 pvBuf = (char *)pvBuf + cb;
2300 GCPhys += cb;
2301 }
2302
2303 /* Advance range if necessary. */
2304 while (pRam && GCPhys > pRam->GCPhysLast)
2305 pRam = pRam->CTX_SUFF(pNext);
2306 } /* Ram range walk */
2307
2308 pgmUnlock(pVM);
2309 return VINF_SUCCESS;
2310}
2311
2312
2313/**
2314 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2315 *
2316 * @returns VBox status code. Can be ignored in ring-3.
2317 * @retval VINF_SUCCESS.
2318 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2319 *
2320 * @param pVM Pointer to the VM.
2321 * @param pPage The page descriptor.
2322 * @param GCPhys The physical address to start writing at.
2323 * @param pvBuf What to write.
2324 * @param cbWrite How much to write - less or equal to a page.
2325 */
2326static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2327{
2328 PGMPAGEMAPLOCK PgMpLck;
2329 void *pvDst = NULL;
2330 int rc;
2331
2332 /*
2333 * Give priority to physical handlers (like #PF does).
2334 *
2335 * Hope for a lonely physical handler first that covers the whole
2336 * write area. This should be a pretty frequent case with MMIO and
2337 * the heavy usage of full page handlers in the page pool.
2338 */
2339 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2340 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
2341 {
2342 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2343 if (pCur)
2344 {
2345 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2346 Assert(pCur->CTX_SUFF(pfnHandler));
2347
2348 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2349 if (cbRange > cbWrite)
2350 cbRange = cbWrite;
2351
2352#ifndef IN_RING3
2353 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2354 NOREF(cbRange);
2355 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2356 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2357
2358#else /* IN_RING3 */
2359 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2360 if (!PGM_PAGE_IS_MMIO(pPage))
2361 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2362 else
2363 rc = VINF_SUCCESS;
2364 if (RT_SUCCESS(rc))
2365 {
2366 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2367 void *pvUser = pCur->CTX_SUFF(pvUser);
2368
2369 STAM_PROFILE_START(&pCur->Stat, h);
2370 PGM_LOCK_ASSERT_OWNER(pVM);
2371 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2372 pgmUnlock(pVM);
2373 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2374 pgmLock(pVM);
2375# ifdef VBOX_WITH_STATISTICS
2376 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2377 if (pCur)
2378 STAM_PROFILE_STOP(&pCur->Stat, h);
2379# else
2380 pCur = NULL; /* might not be valid anymore. */
2381# endif
2382 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2383 {
2384 if (pvDst)
2385 memcpy(pvDst, pvBuf, cbRange);
2386 }
2387 else
2388 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2389 }
2390 else
2391 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2392 GCPhys, pPage, rc), rc);
2393 if (RT_LIKELY(cbRange == cbWrite))
2394 {
2395 if (pvDst)
2396 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2397 return VINF_SUCCESS;
2398 }
2399
2400 /* more fun to be had below */
2401 cbWrite -= cbRange;
2402 GCPhys += cbRange;
2403 pvBuf = (uint8_t *)pvBuf + cbRange;
2404 pvDst = (uint8_t *)pvDst + cbRange;
2405#endif /* IN_RING3 */
2406 }
2407 /* else: the handler is somewhere else in the page, deal with it below. */
2408 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2409 }
2410 /*
2411 * A virtual handler without any interfering physical handlers.
2412 * Hopefully it'll cover the whole write.
2413 */
2414 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2415 {
2416 unsigned iPage;
2417 PPGMVIRTHANDLER pCur;
2418 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2419 if (RT_SUCCESS(rc))
2420 {
2421 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2422 if (cbRange > cbWrite)
2423 cbRange = cbWrite;
2424
2425#ifndef IN_RING3
2426 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2427 NOREF(cbRange);
2428 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2429 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2430
2431#else /* IN_RING3 */
2432
2433 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2434 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2435 if (RT_SUCCESS(rc))
2436 {
2437 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2438 if (pCur->pfnHandlerR3)
2439 {
2440 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2441 + (iPage << PAGE_SHIFT)
2442 + (GCPhys & PAGE_OFFSET_MASK);
2443
2444 STAM_PROFILE_START(&pCur->Stat, h);
2445 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2446 STAM_PROFILE_STOP(&pCur->Stat, h);
2447 }
2448 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2449 memcpy(pvDst, pvBuf, cbRange);
2450 else
2451 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2452 }
2453 else
2454 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2455 GCPhys, pPage, rc), rc);
2456 if (RT_LIKELY(cbRange == cbWrite))
2457 {
2458 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2459 return VINF_SUCCESS;
2460 }
2461
2462 /* more fun to be had below */
2463 cbWrite -= cbRange;
2464 GCPhys += cbRange;
2465 pvBuf = (uint8_t *)pvBuf + cbRange;
2466 pvDst = (uint8_t *)pvDst + cbRange;
2467#endif
2468 }
2469 /* else: the handler is somewhere else in the page, deal with it below. */
2470 }
2471
2472 /*
2473 * Deal with all the odd ends.
2474 */
2475
2476 /* We need a writable destination page. */
2477 if (!pvDst)
2478 {
2479 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2480 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2481 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2482 GCPhys, pPage, rc), rc);
2483 }
2484
2485 /* The loop state (big + ugly). */
2486 unsigned iVirtPage = 0;
2487 PPGMVIRTHANDLER pVirt = NULL;
2488 uint32_t offVirt = PAGE_SIZE;
2489 uint32_t offVirtLast = PAGE_SIZE;
2490 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2491
2492 PPGMPHYSHANDLER pPhys = NULL;
2493 uint32_t offPhys = PAGE_SIZE;
2494 uint32_t offPhysLast = PAGE_SIZE;
2495 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2496
2497 /* The loop. */
2498 for (;;)
2499 {
2500 /*
2501 * Find the closest handler at or above GCPhys.
2502 */
2503 if (fMoreVirt && !pVirt)
2504 {
2505 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2506 if (RT_SUCCESS(rc))
2507 {
2508 offVirt = 0;
2509 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2510 }
2511 else
2512 {
2513 PPGMPHYS2VIRTHANDLER pVirtPhys;
2514 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2515 GCPhys, true /* fAbove */);
2516 if ( pVirtPhys
2517 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2518 {
2519 /* ASSUME that pVirtPhys only covers one page. */
2520 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2521 Assert(pVirtPhys->Core.Key > GCPhys);
2522
2523 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2524 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2525 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2526 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2527 }
2528 else
2529 {
2530 pVirt = NULL;
2531 fMoreVirt = false;
2532 offVirt = offVirtLast = PAGE_SIZE;
2533 }
2534 }
2535 }
2536
2537 if (fMorePhys && !pPhys)
2538 {
2539 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2540 if (pPhys)
2541 {
2542 offPhys = 0;
2543 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2544 }
2545 else
2546 {
2547 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2548 GCPhys, true /* fAbove */);
2549 if ( pPhys
2550 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2551 {
2552 offPhys = pPhys->Core.Key - GCPhys;
2553 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2554 }
2555 else
2556 {
2557 pPhys = NULL;
2558 fMorePhys = false;
2559 offPhys = offPhysLast = PAGE_SIZE;
2560 }
2561 }
2562 }
2563
2564 /*
2565 * Handle access to space without handlers (that's easy).
2566 */
2567 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2568 uint32_t cbRange = (uint32_t)cbWrite;
2569 if (offPhys && offVirt)
2570 {
2571 if (cbRange > offPhys)
2572 cbRange = offPhys;
2573 if (cbRange > offVirt)
2574 cbRange = offVirt;
2575 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2576 }
2577 /*
2578 * Physical handler.
2579 */
2580 else if (!offPhys && offVirt)
2581 {
2582 if (cbRange > offPhysLast + 1)
2583 cbRange = offPhysLast + 1;
2584 if (cbRange > offVirt)
2585 cbRange = offVirt;
2586#ifdef IN_RING3
2587 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2588 void *pvUser = pPhys->CTX_SUFF(pvUser);
2589
2590 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2591 STAM_PROFILE_START(&pPhys->Stat, h);
2592 PGM_LOCK_ASSERT_OWNER(pVM);
2593 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2594 pgmUnlock(pVM);
2595 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2596 pgmLock(pVM);
2597# ifdef VBOX_WITH_STATISTICS
2598 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2599 if (pPhys)
2600 STAM_PROFILE_STOP(&pPhys->Stat, h);
2601# else
2602 pPhys = NULL; /* might not be valid anymore. */
2603# endif
2604 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2605#else
2606 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2607 NOREF(cbRange);
2608 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2609 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2610 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2611#endif
2612 }
2613 /*
2614 * Virtual handler.
2615 */
2616 else if (offPhys && !offVirt)
2617 {
2618 if (cbRange > offVirtLast + 1)
2619 cbRange = offVirtLast + 1;
2620 if (cbRange > offPhys)
2621 cbRange = offPhys;
2622#ifdef IN_RING3
2623 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2624 if (pVirt->pfnHandlerR3)
2625 {
2626 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2627 + (iVirtPage << PAGE_SHIFT)
2628 + (GCPhys & PAGE_OFFSET_MASK);
2629 STAM_PROFILE_START(&pVirt->Stat, h);
2630 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2631 STAM_PROFILE_STOP(&pVirt->Stat, h);
2632 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2633 }
2634 pVirt = NULL;
2635#else
2636 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2637 NOREF(cbRange);
2638 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2639 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2640 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2641#endif
2642 }
2643 /*
2644 * Both... give the physical one priority.
2645 */
2646 else
2647 {
2648 Assert(!offPhys && !offVirt);
2649 if (cbRange > offVirtLast + 1)
2650 cbRange = offVirtLast + 1;
2651 if (cbRange > offPhysLast + 1)
2652 cbRange = offPhysLast + 1;
2653
2654#ifdef IN_RING3
2655 if (pVirt->pfnHandlerR3)
2656 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2657 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2658
2659 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2660 void *pvUser = pPhys->CTX_SUFF(pvUser);
2661
2662 STAM_PROFILE_START(&pPhys->Stat, h);
2663 PGM_LOCK_ASSERT_OWNER(pVM);
2664 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2665 pgmUnlock(pVM);
2666 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2667 pgmLock(pVM);
2668# ifdef VBOX_WITH_STATISTICS
2669 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2670 if (pPhys)
2671 STAM_PROFILE_STOP(&pPhys->Stat, h);
2672# else
2673 pPhys = NULL; /* might not be valid anymore. */
2674# endif
2675 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2676 if (pVirt->pfnHandlerR3)
2677 {
2678
2679 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2680 + (iVirtPage << PAGE_SHIFT)
2681 + (GCPhys & PAGE_OFFSET_MASK);
2682 STAM_PROFILE_START(&pVirt->Stat, h2);
2683 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2684 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2685 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2686 rc = VINF_SUCCESS;
2687 else
2688 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2689 }
2690 pPhys = NULL;
2691 pVirt = NULL;
2692#else
2693 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2694 NOREF(cbRange);
2695 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2696 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2697 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2698#endif
2699 }
2700 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2701 memcpy(pvDst, pvBuf, cbRange);
2702
2703 /*
2704 * Advance if we've got more stuff to do.
2705 */
2706 if (cbRange >= cbWrite)
2707 {
2708 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2709 return VINF_SUCCESS;
2710 }
2711
2712 cbWrite -= cbRange;
2713 GCPhys += cbRange;
2714 pvBuf = (uint8_t *)pvBuf + cbRange;
2715 pvDst = (uint8_t *)pvDst + cbRange;
2716
2717 offPhys -= cbRange;
2718 offPhysLast -= cbRange;
2719 offVirt -= cbRange;
2720 offVirtLast -= cbRange;
2721 }
2722}
2723
2724
2725/**
2726 * Write to physical memory.
2727 *
2728 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2729 * want to ignore those.
2730 *
2731 * @returns VBox status code. Can be ignored in ring-3.
2732 * @retval VINF_SUCCESS.
2733 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2734 *
2735 * @param pVM Pointer to the VM.
2736 * @param GCPhys Physical address to write to.
2737 * @param pvBuf What to write.
2738 * @param cbWrite How many bytes to write.
2739 */
2740VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2741{
2742 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2743 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2744 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2745
2746 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2747 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2748
2749 pgmLock(pVM);
2750
2751 /*
2752 * Copy loop on ram ranges.
2753 */
2754 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2755 for (;;)
2756 {
2757 /* Inside range or not? */
2758 if (pRam && GCPhys >= pRam->GCPhys)
2759 {
2760 /*
2761 * Must work our way thru this page by page.
2762 */
2763 RTGCPTR off = GCPhys - pRam->GCPhys;
2764 while (off < pRam->cb)
2765 {
2766 RTGCPTR iPage = off >> PAGE_SHIFT;
2767 PPGMPAGE pPage = &pRam->aPages[iPage];
2768 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2769 if (cb > cbWrite)
2770 cb = cbWrite;
2771
2772 /*
2773 * Any active WRITE or ALL access handlers?
2774 */
2775 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2776 {
2777 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2778 if (RT_FAILURE(rc))
2779 {
2780 pgmUnlock(pVM);
2781 return rc;
2782 }
2783 }
2784 else
2785 {
2786 /*
2787 * Get the pointer to the page.
2788 */
2789 PGMPAGEMAPLOCK PgMpLck;
2790 void *pvDst;
2791 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2792 if (RT_SUCCESS(rc))
2793 {
2794 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2795 memcpy(pvDst, pvBuf, cb);
2796 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2797 }
2798 /* Ignore writes to ballooned pages. */
2799 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2800 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2801 pRam->GCPhys + off, pPage, rc));
2802 }
2803
2804 /* next page */
2805 if (cb >= cbWrite)
2806 {
2807 pgmUnlock(pVM);
2808 return VINF_SUCCESS;
2809 }
2810
2811 cbWrite -= cb;
2812 off += cb;
2813 pvBuf = (const char *)pvBuf + cb;
2814 } /* walk pages in ram range */
2815
2816 GCPhys = pRam->GCPhysLast + 1;
2817 }
2818 else
2819 {
2820 /*
2821 * Unassigned address space, skip it.
2822 */
2823 if (!pRam)
2824 break;
2825 size_t cb = pRam->GCPhys - GCPhys;
2826 if (cb >= cbWrite)
2827 break;
2828 cbWrite -= cb;
2829 pvBuf = (const char *)pvBuf + cb;
2830 GCPhys += cb;
2831 }
2832
2833 /* Advance range if necessary. */
2834 while (pRam && GCPhys > pRam->GCPhysLast)
2835 pRam = pRam->CTX_SUFF(pNext);
2836 } /* Ram range walk */
2837
2838 pgmUnlock(pVM);
2839 return VINF_SUCCESS;
2840}
2841
2842
2843/**
2844 * Read from guest physical memory by GC physical address, bypassing
2845 * MMIO and access handlers.
2846 *
2847 * @returns VBox status.
2848 * @param pVM Pointer to the VM.
2849 * @param pvDst The destination address.
2850 * @param GCPhysSrc The source address (GC physical address).
2851 * @param cb The number of bytes to read.
2852 */
2853VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2854{
2855 /*
2856 * Treat the first page as a special case.
2857 */
2858 if (!cb)
2859 return VINF_SUCCESS;
2860
2861 /* map the 1st page */
2862 void const *pvSrc;
2863 PGMPAGEMAPLOCK Lock;
2864 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2865 if (RT_FAILURE(rc))
2866 return rc;
2867
2868 /* optimize for the case where access is completely within the first page. */
2869 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2870 if (RT_LIKELY(cb <= cbPage))
2871 {
2872 memcpy(pvDst, pvSrc, cb);
2873 PGMPhysReleasePageMappingLock(pVM, &Lock);
2874 return VINF_SUCCESS;
2875 }
2876
2877 /* copy to the end of the page. */
2878 memcpy(pvDst, pvSrc, cbPage);
2879 PGMPhysReleasePageMappingLock(pVM, &Lock);
2880 GCPhysSrc += cbPage;
2881 pvDst = (uint8_t *)pvDst + cbPage;
2882 cb -= cbPage;
2883
2884 /*
2885 * Page by page.
2886 */
2887 for (;;)
2888 {
2889 /* map the page */
2890 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2891 if (RT_FAILURE(rc))
2892 return rc;
2893
2894 /* last page? */
2895 if (cb <= PAGE_SIZE)
2896 {
2897 memcpy(pvDst, pvSrc, cb);
2898 PGMPhysReleasePageMappingLock(pVM, &Lock);
2899 return VINF_SUCCESS;
2900 }
2901
2902 /* copy the entire page and advance */
2903 memcpy(pvDst, pvSrc, PAGE_SIZE);
2904 PGMPhysReleasePageMappingLock(pVM, &Lock);
2905 GCPhysSrc += PAGE_SIZE;
2906 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2907 cb -= PAGE_SIZE;
2908 }
2909 /* won't ever get here. */
2910}
2911
2912
2913/**
2914 * Write to guest physical memory referenced by GC pointer.
2915 * Write memory to GC physical address in guest physical memory.
2916 *
2917 * This will bypass MMIO and access handlers.
2918 *
2919 * @returns VBox status.
2920 * @param pVM Pointer to the VM.
2921 * @param GCPhysDst The GC physical address of the destination.
2922 * @param pvSrc The source buffer.
2923 * @param cb The number of bytes to write.
2924 */
2925VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2926{
2927 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2928
2929 /*
2930 * Treat the first page as a special case.
2931 */
2932 if (!cb)
2933 return VINF_SUCCESS;
2934
2935 /* map the 1st page */
2936 void *pvDst;
2937 PGMPAGEMAPLOCK Lock;
2938 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2939 if (RT_FAILURE(rc))
2940 return rc;
2941
2942 /* optimize for the case where access is completely within the first page. */
2943 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2944 if (RT_LIKELY(cb <= cbPage))
2945 {
2946 memcpy(pvDst, pvSrc, cb);
2947 PGMPhysReleasePageMappingLock(pVM, &Lock);
2948 return VINF_SUCCESS;
2949 }
2950
2951 /* copy to the end of the page. */
2952 memcpy(pvDst, pvSrc, cbPage);
2953 PGMPhysReleasePageMappingLock(pVM, &Lock);
2954 GCPhysDst += cbPage;
2955 pvSrc = (const uint8_t *)pvSrc + cbPage;
2956 cb -= cbPage;
2957
2958 /*
2959 * Page by page.
2960 */
2961 for (;;)
2962 {
2963 /* map the page */
2964 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2965 if (RT_FAILURE(rc))
2966 return rc;
2967
2968 /* last page? */
2969 if (cb <= PAGE_SIZE)
2970 {
2971 memcpy(pvDst, pvSrc, cb);
2972 PGMPhysReleasePageMappingLock(pVM, &Lock);
2973 return VINF_SUCCESS;
2974 }
2975
2976 /* copy the entire page and advance */
2977 memcpy(pvDst, pvSrc, PAGE_SIZE);
2978 PGMPhysReleasePageMappingLock(pVM, &Lock);
2979 GCPhysDst += PAGE_SIZE;
2980 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2981 cb -= PAGE_SIZE;
2982 }
2983 /* won't ever get here. */
2984}
2985
2986
2987/**
2988 * Read from guest physical memory referenced by GC pointer.
2989 *
2990 * This function uses the current CR3/CR0/CR4 of the guest and will
2991 * bypass access handlers and not set any accessed bits.
2992 *
2993 * @returns VBox status.
2994 * @param pVCpu Handle to the current virtual CPU.
2995 * @param pvDst The destination address.
2996 * @param GCPtrSrc The source address (GC pointer).
2997 * @param cb The number of bytes to read.
2998 */
2999VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3000{
3001 PVM pVM = pVCpu->CTX_SUFF(pVM);
3002/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3003
3004 /*
3005 * Treat the first page as a special case.
3006 */
3007 if (!cb)
3008 return VINF_SUCCESS;
3009
3010 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3011 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3012
3013 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3014 * when many VCPUs are fighting for the lock.
3015 */
3016 pgmLock(pVM);
3017
3018 /* map the 1st page */
3019 void const *pvSrc;
3020 PGMPAGEMAPLOCK Lock;
3021 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3022 if (RT_FAILURE(rc))
3023 {
3024 pgmUnlock(pVM);
3025 return rc;
3026 }
3027
3028 /* optimize for the case where access is completely within the first page. */
3029 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3030 if (RT_LIKELY(cb <= cbPage))
3031 {
3032 memcpy(pvDst, pvSrc, cb);
3033 PGMPhysReleasePageMappingLock(pVM, &Lock);
3034 pgmUnlock(pVM);
3035 return VINF_SUCCESS;
3036 }
3037
3038 /* copy to the end of the page. */
3039 memcpy(pvDst, pvSrc, cbPage);
3040 PGMPhysReleasePageMappingLock(pVM, &Lock);
3041 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3042 pvDst = (uint8_t *)pvDst + cbPage;
3043 cb -= cbPage;
3044
3045 /*
3046 * Page by page.
3047 */
3048 for (;;)
3049 {
3050 /* map the page */
3051 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3052 if (RT_FAILURE(rc))
3053 {
3054 pgmUnlock(pVM);
3055 return rc;
3056 }
3057
3058 /* last page? */
3059 if (cb <= PAGE_SIZE)
3060 {
3061 memcpy(pvDst, pvSrc, cb);
3062 PGMPhysReleasePageMappingLock(pVM, &Lock);
3063 pgmUnlock(pVM);
3064 return VINF_SUCCESS;
3065 }
3066
3067 /* copy the entire page and advance */
3068 memcpy(pvDst, pvSrc, PAGE_SIZE);
3069 PGMPhysReleasePageMappingLock(pVM, &Lock);
3070 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3071 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3072 cb -= PAGE_SIZE;
3073 }
3074 /* won't ever get here. */
3075}
3076
3077
3078/**
3079 * Write to guest physical memory referenced by GC pointer.
3080 *
3081 * This function uses the current CR3/CR0/CR4 of the guest and will
3082 * bypass access handlers and not set dirty or accessed bits.
3083 *
3084 * @returns VBox status.
3085 * @param pVCpu Handle to the current virtual CPU.
3086 * @param GCPtrDst The destination address (GC pointer).
3087 * @param pvSrc The source address.
3088 * @param cb The number of bytes to write.
3089 */
3090VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3091{
3092 PVM pVM = pVCpu->CTX_SUFF(pVM);
3093 VMCPU_ASSERT_EMT(pVCpu);
3094
3095 /*
3096 * Treat the first page as a special case.
3097 */
3098 if (!cb)
3099 return VINF_SUCCESS;
3100
3101 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3102 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3103
3104 /* map the 1st page */
3105 void *pvDst;
3106 PGMPAGEMAPLOCK Lock;
3107 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3108 if (RT_FAILURE(rc))
3109 return rc;
3110
3111 /* optimize for the case where access is completely within the first page. */
3112 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3113 if (RT_LIKELY(cb <= cbPage))
3114 {
3115 memcpy(pvDst, pvSrc, cb);
3116 PGMPhysReleasePageMappingLock(pVM, &Lock);
3117 return VINF_SUCCESS;
3118 }
3119
3120 /* copy to the end of the page. */
3121 memcpy(pvDst, pvSrc, cbPage);
3122 PGMPhysReleasePageMappingLock(pVM, &Lock);
3123 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3124 pvSrc = (const uint8_t *)pvSrc + cbPage;
3125 cb -= cbPage;
3126
3127 /*
3128 * Page by page.
3129 */
3130 for (;;)
3131 {
3132 /* map the page */
3133 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3134 if (RT_FAILURE(rc))
3135 return rc;
3136
3137 /* last page? */
3138 if (cb <= PAGE_SIZE)
3139 {
3140 memcpy(pvDst, pvSrc, cb);
3141 PGMPhysReleasePageMappingLock(pVM, &Lock);
3142 return VINF_SUCCESS;
3143 }
3144
3145 /* copy the entire page and advance */
3146 memcpy(pvDst, pvSrc, PAGE_SIZE);
3147 PGMPhysReleasePageMappingLock(pVM, &Lock);
3148 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3149 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3150 cb -= PAGE_SIZE;
3151 }
3152 /* won't ever get here. */
3153}
3154
3155
3156/**
3157 * Write to guest physical memory referenced by GC pointer and update the PTE.
3158 *
3159 * This function uses the current CR3/CR0/CR4 of the guest and will
3160 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3161 *
3162 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3163 *
3164 * @returns VBox status.
3165 * @param pVCpu Handle to the current virtual CPU.
3166 * @param GCPtrDst The destination address (GC pointer).
3167 * @param pvSrc The source address.
3168 * @param cb The number of bytes to write.
3169 */
3170VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3171{
3172 PVM pVM = pVCpu->CTX_SUFF(pVM);
3173 VMCPU_ASSERT_EMT(pVCpu);
3174
3175 /*
3176 * Treat the first page as a special case.
3177 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3178 */
3179 if (!cb)
3180 return VINF_SUCCESS;
3181
3182 /* map the 1st page */
3183 void *pvDst;
3184 PGMPAGEMAPLOCK Lock;
3185 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3186 if (RT_FAILURE(rc))
3187 return rc;
3188
3189 /* optimize for the case where access is completely within the first page. */
3190 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3191 if (RT_LIKELY(cb <= cbPage))
3192 {
3193 memcpy(pvDst, pvSrc, cb);
3194 PGMPhysReleasePageMappingLock(pVM, &Lock);
3195 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3196 return VINF_SUCCESS;
3197 }
3198
3199 /* copy to the end of the page. */
3200 memcpy(pvDst, pvSrc, cbPage);
3201 PGMPhysReleasePageMappingLock(pVM, &Lock);
3202 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3203 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3204 pvSrc = (const uint8_t *)pvSrc + cbPage;
3205 cb -= cbPage;
3206
3207 /*
3208 * Page by page.
3209 */
3210 for (;;)
3211 {
3212 /* map the page */
3213 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3214 if (RT_FAILURE(rc))
3215 return rc;
3216
3217 /* last page? */
3218 if (cb <= PAGE_SIZE)
3219 {
3220 memcpy(pvDst, pvSrc, cb);
3221 PGMPhysReleasePageMappingLock(pVM, &Lock);
3222 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3223 return VINF_SUCCESS;
3224 }
3225
3226 /* copy the entire page and advance */
3227 memcpy(pvDst, pvSrc, PAGE_SIZE);
3228 PGMPhysReleasePageMappingLock(pVM, &Lock);
3229 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3230 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3231 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3232 cb -= PAGE_SIZE;
3233 }
3234 /* won't ever get here. */
3235}
3236
3237
3238/**
3239 * Read from guest physical memory referenced by GC pointer.
3240 *
3241 * This function uses the current CR3/CR0/CR4 of the guest and will
3242 * respect access handlers and set accessed bits.
3243 *
3244 * @returns VBox status.
3245 * @param pVCpu Handle to the current virtual CPU.
3246 * @param pvDst The destination address.
3247 * @param GCPtrSrc The source address (GC pointer).
3248 * @param cb The number of bytes to read.
3249 * @thread The vCPU EMT.
3250 */
3251VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3252{
3253 RTGCPHYS GCPhys;
3254 uint64_t fFlags;
3255 int rc;
3256 PVM pVM = pVCpu->CTX_SUFF(pVM);
3257 VMCPU_ASSERT_EMT(pVCpu);
3258
3259 /*
3260 * Anything to do?
3261 */
3262 if (!cb)
3263 return VINF_SUCCESS;
3264
3265 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3266
3267 /*
3268 * Optimize reads within a single page.
3269 */
3270 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3271 {
3272 /* Convert virtual to physical address + flags */
3273 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3274 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3275 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3276
3277 /* mark the guest page as accessed. */
3278 if (!(fFlags & X86_PTE_A))
3279 {
3280 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3281 AssertRC(rc);
3282 }
3283
3284 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3285 }
3286
3287 /*
3288 * Page by page.
3289 */
3290 for (;;)
3291 {
3292 /* Convert virtual to physical address + flags */
3293 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3294 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3295 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3296
3297 /* mark the guest page as accessed. */
3298 if (!(fFlags & X86_PTE_A))
3299 {
3300 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3301 AssertRC(rc);
3302 }
3303
3304 /* copy */
3305 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3306 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3307 if (cbRead >= cb || RT_FAILURE(rc))
3308 return rc;
3309
3310 /* next */
3311 cb -= cbRead;
3312 pvDst = (uint8_t *)pvDst + cbRead;
3313 GCPtrSrc += cbRead;
3314 }
3315}
3316
3317
3318/**
3319 * Write to guest physical memory referenced by GC pointer.
3320 *
3321 * This function uses the current CR3/CR0/CR4 of the guest and will
3322 * respect access handlers and set dirty and accessed bits.
3323 *
3324 * @returns VBox status.
3325 * @retval VINF_SUCCESS.
3326 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3327 *
3328 * @param pVCpu Handle to the current virtual CPU.
3329 * @param GCPtrDst The destination address (GC pointer).
3330 * @param pvSrc The source address.
3331 * @param cb The number of bytes to write.
3332 */
3333VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3334{
3335 RTGCPHYS GCPhys;
3336 uint64_t fFlags;
3337 int rc;
3338 PVM pVM = pVCpu->CTX_SUFF(pVM);
3339 VMCPU_ASSERT_EMT(pVCpu);
3340
3341 /*
3342 * Anything to do?
3343 */
3344 if (!cb)
3345 return VINF_SUCCESS;
3346
3347 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3348
3349 /*
3350 * Optimize writes within a single page.
3351 */
3352 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3353 {
3354 /* Convert virtual to physical address + flags */
3355 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3356 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3357 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3358
3359 /* Mention when we ignore X86_PTE_RW... */
3360 if (!(fFlags & X86_PTE_RW))
3361 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3362
3363 /* Mark the guest page as accessed and dirty if necessary. */
3364 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3365 {
3366 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3367 AssertRC(rc);
3368 }
3369
3370 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3371 }
3372
3373 /*
3374 * Page by page.
3375 */
3376 for (;;)
3377 {
3378 /* Convert virtual to physical address + flags */
3379 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3380 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3381 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3382
3383 /* Mention when we ignore X86_PTE_RW... */
3384 if (!(fFlags & X86_PTE_RW))
3385 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3386
3387 /* Mark the guest page as accessed and dirty if necessary. */
3388 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3389 {
3390 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3391 AssertRC(rc);
3392 }
3393
3394 /* copy */
3395 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3396 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3397 if (cbWrite >= cb || RT_FAILURE(rc))
3398 return rc;
3399
3400 /* next */
3401 cb -= cbWrite;
3402 pvSrc = (uint8_t *)pvSrc + cbWrite;
3403 GCPtrDst += cbWrite;
3404 }
3405}
3406
3407
3408/**
3409 * Performs a read of guest virtual memory for instruction emulation.
3410 *
3411 * This will check permissions, raise exceptions and update the access bits.
3412 *
3413 * The current implementation will bypass all access handlers. It may later be
3414 * changed to at least respect MMIO.
3415 *
3416 *
3417 * @returns VBox status code suitable to scheduling.
3418 * @retval VINF_SUCCESS if the read was performed successfully.
3419 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3420 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3421 *
3422 * @param pVCpu Handle to the current virtual CPU.
3423 * @param pCtxCore The context core.
3424 * @param pvDst Where to put the bytes we've read.
3425 * @param GCPtrSrc The source address.
3426 * @param cb The number of bytes to read. Not more than a page.
3427 *
3428 * @remark This function will dynamically map physical pages in GC. This may unmap
3429 * mappings done by the caller. Be careful!
3430 */
3431VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3432{
3433 PVM pVM = pVCpu->CTX_SUFF(pVM);
3434 Assert(cb <= PAGE_SIZE);
3435 VMCPU_ASSERT_EMT(pVCpu);
3436
3437/** @todo r=bird: This isn't perfect!
3438 * -# It's not checking for reserved bits being 1.
3439 * -# It's not correctly dealing with the access bit.
3440 * -# It's not respecting MMIO memory or any other access handlers.
3441 */
3442 /*
3443 * 1. Translate virtual to physical. This may fault.
3444 * 2. Map the physical address.
3445 * 3. Do the read operation.
3446 * 4. Set access bits if required.
3447 */
3448 int rc;
3449 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3450 if (cb <= cb1)
3451 {
3452 /*
3453 * Not crossing pages.
3454 */
3455 RTGCPHYS GCPhys;
3456 uint64_t fFlags;
3457 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3458 if (RT_SUCCESS(rc))
3459 {
3460 /** @todo we should check reserved bits ... */
3461 PGMPAGEMAPLOCK PgMpLck;
3462 void const *pvSrc;
3463 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3464 switch (rc)
3465 {
3466 case VINF_SUCCESS:
3467 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3468 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3469 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3470 break;
3471 case VERR_PGM_PHYS_PAGE_RESERVED:
3472 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3473 memset(pvDst, 0xff, cb);
3474 break;
3475 default:
3476 Assert(RT_FAILURE_NP(rc));
3477 return rc;
3478 }
3479
3480 /** @todo access bit emulation isn't 100% correct. */
3481 if (!(fFlags & X86_PTE_A))
3482 {
3483 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3484 AssertRC(rc);
3485 }
3486 return VINF_SUCCESS;
3487 }
3488 }
3489 else
3490 {
3491 /*
3492 * Crosses pages.
3493 */
3494 size_t cb2 = cb - cb1;
3495 uint64_t fFlags1;
3496 RTGCPHYS GCPhys1;
3497 uint64_t fFlags2;
3498 RTGCPHYS GCPhys2;
3499 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3500 if (RT_SUCCESS(rc))
3501 {
3502 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3503 if (RT_SUCCESS(rc))
3504 {
3505 /** @todo we should check reserved bits ... */
3506 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3507 PGMPAGEMAPLOCK PgMpLck;
3508 void const *pvSrc1;
3509 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3510 switch (rc)
3511 {
3512 case VINF_SUCCESS:
3513 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3514 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3515 break;
3516 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3517 memset(pvDst, 0xff, cb1);
3518 break;
3519 default:
3520 Assert(RT_FAILURE_NP(rc));
3521 return rc;
3522 }
3523
3524 void const *pvSrc2;
3525 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3526 switch (rc)
3527 {
3528 case VINF_SUCCESS:
3529 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3530 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3531 break;
3532 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3533 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3534 break;
3535 default:
3536 Assert(RT_FAILURE_NP(rc));
3537 return rc;
3538 }
3539
3540 if (!(fFlags1 & X86_PTE_A))
3541 {
3542 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3543 AssertRC(rc);
3544 }
3545 if (!(fFlags2 & X86_PTE_A))
3546 {
3547 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3548 AssertRC(rc);
3549 }
3550 return VINF_SUCCESS;
3551 }
3552 }
3553 }
3554
3555 /*
3556 * Raise a #PF.
3557 */
3558 uint32_t uErr;
3559
3560 /* Get the current privilege level. */
3561 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3562 switch (rc)
3563 {
3564 case VINF_SUCCESS:
3565 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3566 break;
3567
3568 case VERR_PAGE_NOT_PRESENT:
3569 case VERR_PAGE_TABLE_NOT_PRESENT:
3570 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3571 break;
3572
3573 default:
3574 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3575 return rc;
3576 }
3577 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3578 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3579}
3580
3581
3582/**
3583 * Performs a read of guest virtual memory for instruction emulation.
3584 *
3585 * This will check permissions, raise exceptions and update the access bits.
3586 *
3587 * The current implementation will bypass all access handlers. It may later be
3588 * changed to at least respect MMIO.
3589 *
3590 *
3591 * @returns VBox status code suitable to scheduling.
3592 * @retval VINF_SUCCESS if the read was performed successfully.
3593 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3594 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3595 *
3596 * @param pVCpu Handle to the current virtual CPU.
3597 * @param pCtxCore The context core.
3598 * @param pvDst Where to put the bytes we've read.
3599 * @param GCPtrSrc The source address.
3600 * @param cb The number of bytes to read. Not more than a page.
3601 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3602 * an appropriate error status will be returned (no
3603 * informational at all).
3604 *
3605 *
3606 * @remarks Takes the PGM lock.
3607 * @remarks A page fault on the 2nd page of the access will be raised without
3608 * writing the bits on the first page since we're ASSUMING that the
3609 * caller is emulating an instruction access.
3610 * @remarks This function will dynamically map physical pages in GC. This may
3611 * unmap mappings done by the caller. Be careful!
3612 */
3613VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3614 bool fRaiseTrap)
3615{
3616 PVM pVM = pVCpu->CTX_SUFF(pVM);
3617 Assert(cb <= PAGE_SIZE);
3618 VMCPU_ASSERT_EMT(pVCpu);
3619
3620 /*
3621 * 1. Translate virtual to physical. This may fault.
3622 * 2. Map the physical address.
3623 * 3. Do the read operation.
3624 * 4. Set access bits if required.
3625 */
3626 int rc;
3627 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3628 if (cb <= cb1)
3629 {
3630 /*
3631 * Not crossing pages.
3632 */
3633 RTGCPHYS GCPhys;
3634 uint64_t fFlags;
3635 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3636 if (RT_SUCCESS(rc))
3637 {
3638 if (1) /** @todo we should check reserved bits ... */
3639 {
3640 const void *pvSrc;
3641 PGMPAGEMAPLOCK Lock;
3642 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3643 switch (rc)
3644 {
3645 case VINF_SUCCESS:
3646 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3647 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3648 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3649 PGMPhysReleasePageMappingLock(pVM, &Lock);
3650 break;
3651 case VERR_PGM_PHYS_PAGE_RESERVED:
3652 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3653 memset(pvDst, 0xff, cb);
3654 break;
3655 default:
3656 AssertMsgFailed(("%Rrc\n", rc));
3657 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3658 return rc;
3659 }
3660
3661 if (!(fFlags & X86_PTE_A))
3662 {
3663 /** @todo access bit emulation isn't 100% correct. */
3664 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3665 AssertRC(rc);
3666 }
3667 return VINF_SUCCESS;
3668 }
3669 }
3670 }
3671 else
3672 {
3673 /*
3674 * Crosses pages.
3675 */
3676 size_t cb2 = cb - cb1;
3677 uint64_t fFlags1;
3678 RTGCPHYS GCPhys1;
3679 uint64_t fFlags2;
3680 RTGCPHYS GCPhys2;
3681 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3682 if (RT_SUCCESS(rc))
3683 {
3684 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3685 if (RT_SUCCESS(rc))
3686 {
3687 if (1) /** @todo we should check reserved bits ... */
3688 {
3689 const void *pvSrc;
3690 PGMPAGEMAPLOCK Lock;
3691 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3692 switch (rc)
3693 {
3694 case VINF_SUCCESS:
3695 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3696 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3697 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3698 PGMPhysReleasePageMappingLock(pVM, &Lock);
3699 break;
3700 case VERR_PGM_PHYS_PAGE_RESERVED:
3701 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3702 memset(pvDst, 0xff, cb1);
3703 break;
3704 default:
3705 AssertMsgFailed(("%Rrc\n", rc));
3706 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3707 return rc;
3708 }
3709
3710 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3711 switch (rc)
3712 {
3713 case VINF_SUCCESS:
3714 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3715 PGMPhysReleasePageMappingLock(pVM, &Lock);
3716 break;
3717 case VERR_PGM_PHYS_PAGE_RESERVED:
3718 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3719 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3720 break;
3721 default:
3722 AssertMsgFailed(("%Rrc\n", rc));
3723 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3724 return rc;
3725 }
3726
3727 if (!(fFlags1 & X86_PTE_A))
3728 {
3729 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3730 AssertRC(rc);
3731 }
3732 if (!(fFlags2 & X86_PTE_A))
3733 {
3734 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3735 AssertRC(rc);
3736 }
3737 return VINF_SUCCESS;
3738 }
3739 /* sort out which page */
3740 }
3741 else
3742 GCPtrSrc += cb1; /* fault on 2nd page */
3743 }
3744 }
3745
3746 /*
3747 * Raise a #PF if we're allowed to do that.
3748 */
3749 /* Calc the error bits. */
3750 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3751 uint32_t uErr;
3752 switch (rc)
3753 {
3754 case VINF_SUCCESS:
3755 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3756 rc = VERR_ACCESS_DENIED;
3757 break;
3758
3759 case VERR_PAGE_NOT_PRESENT:
3760 case VERR_PAGE_TABLE_NOT_PRESENT:
3761 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3762 break;
3763
3764 default:
3765 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3766 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3767 return rc;
3768 }
3769 if (fRaiseTrap)
3770 {
3771 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3772 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3773 }
3774 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3775 return rc;
3776}
3777
3778
3779/**
3780 * Performs a write to guest virtual memory for instruction emulation.
3781 *
3782 * This will check permissions, raise exceptions and update the dirty and access
3783 * bits.
3784 *
3785 * @returns VBox status code suitable to scheduling.
3786 * @retval VINF_SUCCESS if the read was performed successfully.
3787 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3788 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3789 *
3790 * @param pVCpu Handle to the current virtual CPU.
3791 * @param pCtxCore The context core.
3792 * @param GCPtrDst The destination address.
3793 * @param pvSrc What to write.
3794 * @param cb The number of bytes to write. Not more than a page.
3795 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3796 * an appropriate error status will be returned (no
3797 * informational at all).
3798 *
3799 * @remarks Takes the PGM lock.
3800 * @remarks A page fault on the 2nd page of the access will be raised without
3801 * writing the bits on the first page since we're ASSUMING that the
3802 * caller is emulating an instruction access.
3803 * @remarks This function will dynamically map physical pages in GC. This may
3804 * unmap mappings done by the caller. Be careful!
3805 */
3806VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3807 size_t cb, bool fRaiseTrap)
3808{
3809 Assert(cb <= PAGE_SIZE);
3810 PVM pVM = pVCpu->CTX_SUFF(pVM);
3811 VMCPU_ASSERT_EMT(pVCpu);
3812
3813 /*
3814 * 1. Translate virtual to physical. This may fault.
3815 * 2. Map the physical address.
3816 * 3. Do the write operation.
3817 * 4. Set access bits if required.
3818 */
3819 /** @todo Since this method is frequently used by EMInterpret or IOM
3820 * upon a write fault to an write access monitored page, we can
3821 * reuse the guest page table walking from the \#PF code. */
3822 int rc;
3823 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3824 if (cb <= cb1)
3825 {
3826 /*
3827 * Not crossing pages.
3828 */
3829 RTGCPHYS GCPhys;
3830 uint64_t fFlags;
3831 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3832 if (RT_SUCCESS(rc))
3833 {
3834 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3835 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3836 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3837 {
3838 void *pvDst;
3839 PGMPAGEMAPLOCK Lock;
3840 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3841 switch (rc)
3842 {
3843 case VINF_SUCCESS:
3844 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3845 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3846 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3847 PGMPhysReleasePageMappingLock(pVM, &Lock);
3848 break;
3849 case VERR_PGM_PHYS_PAGE_RESERVED:
3850 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3851 /* bit bucket */
3852 break;
3853 default:
3854 AssertMsgFailed(("%Rrc\n", rc));
3855 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3856 return rc;
3857 }
3858
3859 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3860 {
3861 /** @todo dirty & access bit emulation isn't 100% correct. */
3862 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3863 AssertRC(rc);
3864 }
3865 return VINF_SUCCESS;
3866 }
3867 rc = VERR_ACCESS_DENIED;
3868 }
3869 }
3870 else
3871 {
3872 /*
3873 * Crosses pages.
3874 */
3875 size_t cb2 = cb - cb1;
3876 uint64_t fFlags1;
3877 RTGCPHYS GCPhys1;
3878 uint64_t fFlags2;
3879 RTGCPHYS GCPhys2;
3880 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3881 if (RT_SUCCESS(rc))
3882 {
3883 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3884 if (RT_SUCCESS(rc))
3885 {
3886 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3887 && (fFlags2 & X86_PTE_RW))
3888 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3889 && CPUMGetGuestCPL(pVCpu) <= 2) )
3890 {
3891 void *pvDst;
3892 PGMPAGEMAPLOCK Lock;
3893 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3894 switch (rc)
3895 {
3896 case VINF_SUCCESS:
3897 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3898 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3899 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3900 PGMPhysReleasePageMappingLock(pVM, &Lock);
3901 break;
3902 case VERR_PGM_PHYS_PAGE_RESERVED:
3903 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3904 /* bit bucket */
3905 break;
3906 default:
3907 AssertMsgFailed(("%Rrc\n", rc));
3908 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3909 return rc;
3910 }
3911
3912 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3913 switch (rc)
3914 {
3915 case VINF_SUCCESS:
3916 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3917 PGMPhysReleasePageMappingLock(pVM, &Lock);
3918 break;
3919 case VERR_PGM_PHYS_PAGE_RESERVED:
3920 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3921 /* bit bucket */
3922 break;
3923 default:
3924 AssertMsgFailed(("%Rrc\n", rc));
3925 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3926 return rc;
3927 }
3928
3929 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3930 {
3931 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3932 AssertRC(rc);
3933 }
3934 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3935 {
3936 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3937 AssertRC(rc);
3938 }
3939 return VINF_SUCCESS;
3940 }
3941 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3942 GCPtrDst += cb1; /* fault on the 2nd page. */
3943 rc = VERR_ACCESS_DENIED;
3944 }
3945 else
3946 GCPtrDst += cb1; /* fault on the 2nd page. */
3947 }
3948 }
3949
3950 /*
3951 * Raise a #PF if we're allowed to do that.
3952 */
3953 /* Calc the error bits. */
3954 uint32_t uErr;
3955 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3956 switch (rc)
3957 {
3958 case VINF_SUCCESS:
3959 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3960 rc = VERR_ACCESS_DENIED;
3961 break;
3962
3963 case VERR_ACCESS_DENIED:
3964 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3965 break;
3966
3967 case VERR_PAGE_NOT_PRESENT:
3968 case VERR_PAGE_TABLE_NOT_PRESENT:
3969 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3970 break;
3971
3972 default:
3973 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3974 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3975 return rc;
3976 }
3977 if (fRaiseTrap)
3978 {
3979 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3980 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3981 }
3982 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3983 return rc;
3984}
3985
3986
3987/**
3988 * Return the page type of the specified physical address.
3989 *
3990 * @returns The page type.
3991 * @param pVM Pointer to the VM.
3992 * @param GCPhys Guest physical address
3993 */
3994VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
3995{
3996 pgmLock(pVM);
3997 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3998 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3999 pgmUnlock(pVM);
4000
4001 return enmPgType;
4002}
4003
4004
4005
4006
4007/**
4008 * Converts a GC physical address to a HC ring-3 pointer, with some
4009 * additional checks.
4010 *
4011 * @returns VBox status code (no informational statuses).
4012 * @retval VINF_SUCCESS on success.
4013 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4014 * access handler of some kind.
4015 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4016 * accesses or is odd in any way.
4017 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4018 *
4019 * @param pVM Pointer to the VM.
4020 * @param GCPhys The GC physical address to convert. Since this is only
4021 * used for filling the REM TLB, the A20 mask must be
4022 * applied before calling this API.
4023 * @param fWritable Whether write access is required.
4024 * @param ppv Where to store the pointer corresponding to GCPhys on
4025 * success.
4026 * @param pLock
4027 *
4028 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4029 */
4030VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4031 void **ppv, PPGMPAGEMAPLOCK pLock)
4032{
4033 pgmLock(pVM);
4034 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4035
4036 PPGMRAMRANGE pRam;
4037 PPGMPAGE pPage;
4038 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4039 if (RT_SUCCESS(rc))
4040 {
4041 if (PGM_PAGE_IS_BALLOONED(pPage))
4042 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4043 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4044 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4045 rc = VINF_SUCCESS;
4046 else
4047 {
4048 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4049 {
4050 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4051 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4052 }
4053 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4054 {
4055 Assert(!fByPassHandlers);
4056 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4057 }
4058 }
4059 if (RT_SUCCESS(rc))
4060 {
4061 int rc2;
4062
4063 /* Make sure what we return is writable. */
4064 if (fWritable)
4065 switch (PGM_PAGE_GET_STATE(pPage))
4066 {
4067 case PGM_PAGE_STATE_ALLOCATED:
4068 break;
4069 case PGM_PAGE_STATE_BALLOONED:
4070 AssertFailed();
4071 case PGM_PAGE_STATE_ZERO:
4072 case PGM_PAGE_STATE_SHARED:
4073 case PGM_PAGE_STATE_WRITE_MONITORED:
4074 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4075 AssertLogRelRCReturn(rc2, rc2);
4076 break;
4077 }
4078
4079#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4080 PVMCPU pVCpu = VMMGetCpu(pVM);
4081 void *pv;
4082 rc = pgmRZDynMapHCPageInlined(pVCpu,
4083 PGM_PAGE_GET_HCPHYS(pPage),
4084 &pv
4085 RTLOG_COMMA_SRC_POS);
4086 if (RT_FAILURE(rc))
4087 return rc;
4088 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4089 pLock->pvPage = pv;
4090 pLock->pVCpu = pVCpu;
4091
4092#else
4093 /* Get a ring-3 mapping of the address. */
4094 PPGMPAGER3MAPTLBE pTlbe;
4095 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4096 AssertLogRelRCReturn(rc2, rc2);
4097
4098 /* Lock it and calculate the address. */
4099 if (fWritable)
4100 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4101 else
4102 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4103 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4104#endif
4105
4106 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4107 }
4108 else
4109 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4110
4111 /* else: handler catching all access, no pointer returned. */
4112 }
4113 else
4114 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4115
4116 pgmUnlock(pVM);
4117 return rc;
4118}
4119
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette