VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 29888

Last change on this file since 29888 was 29250, checked in by vboxsync, 15 years ago

iprt/asm*.h: split out asm-math.h, don't include asm-*.h from asm.h, don't include asm.h from sup.h. Fixed a couple file headers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 128.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 29250 2010-05-09 17:53:58Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/pgm.h>
23#include <VBox/trpm.h>
24#include <VBox/vmm.h>
25#include <VBox/iom.h>
26#include <VBox/em.h>
27#include <VBox/rem.h>
28#include "../PGMInternal.h"
29#include <VBox/vm.h>
30#include "../PGMInline.h"
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <iprt/assert.h>
34#include <iprt/string.h>
35#include <iprt/asm-amd64-x86.h>
36#include <VBox/log.h>
37#ifdef IN_RING3
38# include <iprt/thread.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** Enable the physical TLB. */
46#define PGM_WITH_PHYS_TLB
47
48
49
50#ifndef IN_RING3
51
52/**
53 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
54 * This simply pushes everything to the HC handler.
55 *
56 * @returns VBox status code (appropritate for trap handling and GC return).
57 * @param pVM VM Handle.
58 * @param uErrorCode CPU Error code.
59 * @param pRegFrame Trap register frame.
60 * @param pvFault The fault address (cr2).
61 * @param GCPhysFault The GC physical address corresponding to pvFault.
62 * @param pvUser User argument.
63 */
64VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
65{
66 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
67}
68
69
70/**
71 * \#PF Handler callback for Guest ROM range write access.
72 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
73 *
74 * @returns VBox status code (appropritate for trap handling and GC return).
75 * @param pVM VM Handle.
76 * @param uErrorCode CPU Error code.
77 * @param pRegFrame Trap register frame.
78 * @param pvFault The fault address (cr2).
79 * @param GCPhysFault The GC physical address corresponding to pvFault.
80 * @param pvUser User argument. Pointer to the ROM range structure.
81 */
82VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
83{
84 int rc;
85 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
86 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
87 PVMCPU pVCpu = VMMGetCpu(pVM);
88
89 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
90 switch (pRom->aPages[iPage].enmProt)
91 {
92 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
93 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
94 {
95 /*
96 * If it's a simple instruction which doesn't change the cpu state
97 * we will simply skip it. Otherwise we'll have to defer it to REM.
98 */
99 uint32_t cbOp;
100 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
101 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
102 if ( RT_SUCCESS(rc)
103 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
104 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
105 {
106 switch (pDis->opcode)
107 {
108 /** @todo Find other instructions we can safely skip, possibly
109 * adding this kind of detection to DIS or EM. */
110 case OP_MOV:
111 pRegFrame->rip += cbOp;
112 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
113 return VINF_SUCCESS;
114 }
115 }
116 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
117 return rc;
118 break;
119 }
120
121 case PGMROMPROT_READ_RAM_WRITE_RAM:
122 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
123 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
124 AssertRC(rc);
125 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
126
127 case PGMROMPROT_READ_ROM_WRITE_RAM:
128 /* Handle it in ring-3 because it's *way* easier there. */
129 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
130 break;
131
132 default:
133 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
134 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
135 VERR_INTERNAL_ERROR);
136 }
137
138 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
139 return VINF_EM_RAW_EMULATE_INSTR;
140}
141
142#endif /* IN_RING3 */
143
144/**
145 * Checks if Address Gate 20 is enabled or not.
146 *
147 * @returns true if enabled.
148 * @returns false if disabled.
149 * @param pVCpu VMCPU handle.
150 */
151VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
152{
153 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
154 return pVCpu->pgm.s.fA20Enabled;
155}
156
157
158/**
159 * Validates a GC physical address.
160 *
161 * @returns true if valid.
162 * @returns false if invalid.
163 * @param pVM The VM handle.
164 * @param GCPhys The physical address to validate.
165 */
166VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
167{
168 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
169 return pPage != NULL;
170}
171
172
173/**
174 * Checks if a GC physical address is a normal page,
175 * i.e. not ROM, MMIO or reserved.
176 *
177 * @returns true if normal.
178 * @returns false if invalid, ROM, MMIO or reserved page.
179 * @param pVM The VM handle.
180 * @param GCPhys The physical address to check.
181 */
182VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
183{
184 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
185 return pPage
186 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
187}
188
189
190/**
191 * Converts a GC physical address to a HC physical address.
192 *
193 * @returns VINF_SUCCESS on success.
194 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
195 * page but has no physical backing.
196 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
197 * GC physical address.
198 *
199 * @param pVM The VM handle.
200 * @param GCPhys The GC physical address to convert.
201 * @param pHCPhys Where to store the HC physical address on success.
202 */
203VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
204{
205 pgmLock(pVM);
206 PPGMPAGE pPage;
207 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
208 if (RT_SUCCESS(rc))
209 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
210 pgmUnlock(pVM);
211 return rc;
212}
213
214
215/**
216 * Invalidates all page mapping TLBs.
217 *
218 * @param pVM The VM handle.
219 */
220VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
221{
222 pgmLock(pVM);
223 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushes);
224 /* Clear the shared R0/R3 TLB completely. */
225 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
226 {
227 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
228 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
229 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
230 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
231 }
232 /* @todo clear the RC TLB whenever we add it. */
233 pgmUnlock(pVM);
234}
235
236/**
237 * Invalidates a page mapping TLB entry
238 *
239 * @param pVM The VM handle.
240 * @param GCPhys GCPhys entry to flush
241 */
242VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
243{
244 Assert(PGMIsLocked(pVM));
245
246 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushEntry);
247 /* Clear the shared R0/R3 TLB entry. */
248#ifdef IN_RC
249 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
250 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
251 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
252 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
253 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
254#else
255 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
256 pTlbe->GCPhys = NIL_RTGCPHYS;
257 pTlbe->pPage = 0;
258 pTlbe->pMap = 0;
259 pTlbe->pv = 0;
260#endif
261 /* @todo clear the RC TLB whenever we add it. */
262}
263
264/**
265 * Makes sure that there is at least one handy page ready for use.
266 *
267 * This will also take the appropriate actions when reaching water-marks.
268 *
269 * @returns VBox status code.
270 * @retval VINF_SUCCESS on success.
271 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
272 *
273 * @param pVM The VM handle.
274 *
275 * @remarks Must be called from within the PGM critical section. It may
276 * nip back to ring-3/0 in some cases.
277 */
278static int pgmPhysEnsureHandyPage(PVM pVM)
279{
280 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
281
282 /*
283 * Do we need to do anything special?
284 */
285#ifdef IN_RING3
286 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
287#else
288 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
289#endif
290 {
291 /*
292 * Allocate pages only if we're out of them, or in ring-3, almost out.
293 */
294#ifdef IN_RING3
295 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
296#else
297 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
298#endif
299 {
300 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
301 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
302#ifdef IN_RING3
303 int rc = PGMR3PhysAllocateHandyPages(pVM);
304#else
305 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
306#endif
307 if (RT_UNLIKELY(rc != VINF_SUCCESS))
308 {
309 if (RT_FAILURE(rc))
310 return rc;
311 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
312 if (!pVM->pgm.s.cHandyPages)
313 {
314 LogRel(("PGM: no more handy pages!\n"));
315 return VERR_EM_NO_MEMORY;
316 }
317 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
318 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
319#ifdef IN_RING3
320 REMR3NotifyFF(pVM);
321#else
322 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
323#endif
324 }
325 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
326 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
327 ("%u\n", pVM->pgm.s.cHandyPages),
328 VERR_INTERNAL_ERROR);
329 }
330 else
331 {
332 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
333 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
334#ifndef IN_RING3
335 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
336 {
337 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
338 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
339 }
340#endif
341 }
342 }
343
344 return VINF_SUCCESS;
345}
346
347
348/**
349 * Replace a zero or shared page with new page that we can write to.
350 *
351 * @returns The following VBox status codes.
352 * @retval VINF_SUCCESS on success, pPage is modified.
353 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
354 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
355 *
356 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
357 *
358 * @param pVM The VM address.
359 * @param pPage The physical page tracking structure. This will
360 * be modified on success.
361 * @param GCPhys The address of the page.
362 *
363 * @remarks Must be called from within the PGM critical section. It may
364 * nip back to ring-3/0 in some cases.
365 *
366 * @remarks This function shouldn't really fail, however if it does
367 * it probably means we've screwed up the size of handy pages and/or
368 * the low-water mark. Or, that some device I/O is causing a lot of
369 * pages to be allocated while while the host is in a low-memory
370 * condition. This latter should be handled elsewhere and in a more
371 * controlled manner, it's on the @bugref{3170} todo list...
372 */
373int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
374{
375 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
376
377 /*
378 * Prereqs.
379 */
380 Assert(PGMIsLocked(pVM));
381 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
382 Assert(!PGM_PAGE_IS_MMIO(pPage));
383
384# ifdef PGM_WITH_LARGE_PAGES
385 if ( PGMIsUsingLargePages(pVM)
386 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
387 {
388 int rc = pgmPhysAllocLargePage(pVM, GCPhys);
389 if (rc == VINF_SUCCESS)
390 return rc;
391
392 /* fall back to 4kb pages. */
393 }
394# endif
395
396 /*
397 * Flush any shadow page table mappings of the page.
398 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
399 */
400 bool fFlushTLBs = false;
401 int rc = pgmPoolTrackFlushGCPhys(pVM, GCPhys, pPage, &fFlushTLBs);
402 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
403
404 /*
405 * Ensure that we've got a page handy, take it and use it.
406 */
407 int rc2 = pgmPhysEnsureHandyPage(pVM);
408 if (RT_FAILURE(rc2))
409 {
410 if (fFlushTLBs)
411 PGM_INVL_ALL_VCPU_TLBS(pVM);
412 Assert(rc2 == VERR_EM_NO_MEMORY);
413 return rc2;
414 }
415 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
416 Assert(PGMIsLocked(pVM));
417 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
418 Assert(!PGM_PAGE_IS_MMIO(pPage));
419
420 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
421 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
422 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
423 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
424 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
425 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
426
427 /*
428 * There are one or two action to be taken the next time we allocate handy pages:
429 * - Tell the GMM (global memory manager) what the page is being used for.
430 * (Speeds up replacement operations - sharing and defragmenting.)
431 * - If the current backing is shared, it must be freed.
432 */
433 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
434 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
435
436 const void *pvSharedPage = NULL;
437
438 if (PGM_PAGE_IS_SHARED(pPage))
439 {
440 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
441 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
442 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
443
444 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
445 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
446 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
447 pVM->pgm.s.cSharedPages--;
448
449 /* Grab the address of the page so we can make a copy later on. */
450 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
451 AssertRC(rc);
452 }
453 else
454 {
455 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
456 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
457 pVM->pgm.s.cZeroPages--;
458 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
459 }
460
461 /*
462 * Do the PGMPAGE modifications.
463 */
464 pVM->pgm.s.cPrivatePages++;
465 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
466 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
467 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
468 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
469 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
470
471 /* Copy the shared page contents to the replacement page. */
472 if (pvSharedPage)
473 {
474 void *pvNewPage;
475
476 /* Get the virtual address of the new page. */
477 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage);
478 AssertRC(rc);
479 if (rc == VINF_SUCCESS)
480 {
481 /** todo write ASMMemCopy */
482 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE);
483 }
484 }
485
486 if ( fFlushTLBs
487 && rc != VINF_PGM_GCPHYS_ALIASED)
488 PGM_INVL_ALL_VCPU_TLBS(pVM);
489 return rc;
490}
491
492#ifdef PGM_WITH_LARGE_PAGES
493/**
494 * Replace a 2 MB range of zero pages with new pages that we can write to.
495 *
496 * @returns The following VBox status codes.
497 * @retval VINF_SUCCESS on success, pPage is modified.
498 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
499 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
500 *
501 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
502 *
503 * @param pVM The VM address.
504 * @param GCPhys The address of the page.
505 *
506 * @remarks Must be called from within the PGM critical section. It may
507 * nip back to ring-3/0 in some cases.
508 */
509int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
510{
511 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
512 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
513
514 /*
515 * Prereqs.
516 */
517 Assert(PGMIsLocked(pVM));
518 Assert(PGMIsUsingLargePages(pVM));
519
520 PPGMPAGE pPage;
521 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
522 if ( RT_SUCCESS(rc)
523 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
524 {
525 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
526
527 /* Don't call this function for already allocated pages. */
528 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
529
530 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
531 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
532 {
533 unsigned iPage;
534
535 GCPhys = GCPhysBase;
536
537 /* Lazy approach: check all pages in the 2 MB range.
538 * The whole range must be ram and unallocated
539 */
540 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
541 {
542 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
543 if ( RT_FAILURE(rc)
544 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
545 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO) /* allocated, monitored or shared means we can't use a large page here */
546 {
547 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
548 break;
549 }
550 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
551 GCPhys += PAGE_SIZE;
552 }
553 /* Fetch the start page of the 2 MB range again. */
554 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
555 AssertRC(rc); /* can't fail */
556
557 if (iPage != _2M/PAGE_SIZE)
558 {
559 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
560 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
561 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
562 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
563 }
564 else
565 {
566# ifdef IN_RING3
567 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
568# else
569 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
570# endif
571 if (RT_SUCCESS(rc))
572 {
573 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
574 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageAlloc);
575 return VINF_SUCCESS;
576 }
577 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
578
579 /* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
580 PGMSetLargePageUsage(pVM, false);
581 return rc;
582 }
583 }
584 }
585 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
586}
587
588/**
589 * Recheck the entire 2 MB range to see if we can use it again as a large page.
590 *
591 * @returns The following VBox status codes.
592 * @retval VINF_SUCCESS on success, the large page can be used again
593 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
594 *
595 * @param pVM The VM address.
596 * @param GCPhys The address of the page.
597 * @param pLargePage Page structure of the base page
598 */
599int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
600{
601 unsigned i;
602
603 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
604
605 GCPhys &= X86_PDE2M_PAE_PG_MASK;
606
607 /* Check the base page. */
608 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
609 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
610 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
611 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
612 {
613 LogFlow(("pgmPhysIsValidLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
614 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
615 }
616
617 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,IsValidLargePage), a);
618 /* Check all remaining pages in the 2 MB range. */
619 GCPhys += PAGE_SIZE;
620 for (i = 1; i < _2M/PAGE_SIZE; i++)
621 {
622 PPGMPAGE pPage;
623 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
624 AssertRCBreak(rc);
625
626 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
627 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
628 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
629 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
630 {
631 LogFlow(("pgmPhysIsValidLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
632 break;
633 }
634
635 GCPhys += PAGE_SIZE;
636 }
637 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,IsValidLargePage), a);
638
639 if (i == _2M/PAGE_SIZE)
640 {
641 PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE);
642 Log(("pgmPhysIsValidLargePage: page %RGp can be reused!\n", GCPhys - _2M));
643 return VINF_SUCCESS;
644 }
645
646 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
647}
648
649#endif /* PGM_WITH_LARGE_PAGES */
650
651/**
652 * Deal with a write monitored page.
653 *
654 * @returns VBox strict status code.
655 *
656 * @param pVM The VM address.
657 * @param pPage The physical page tracking structure.
658 *
659 * @remarks Called from within the PGM critical section.
660 */
661void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
662{
663 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
664 PGM_PAGE_SET_WRITTEN_TO(pPage);
665 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
666 Assert(pVM->pgm.s.cMonitoredPages > 0);
667 pVM->pgm.s.cMonitoredPages--;
668 pVM->pgm.s.cWrittenToPages++;
669}
670
671
672/**
673 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
674 *
675 * @returns VBox strict status code.
676 * @retval VINF_SUCCESS on success.
677 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
678 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
679 *
680 * @param pVM The VM address.
681 * @param pPage The physical page tracking structure.
682 * @param GCPhys The address of the page.
683 *
684 * @remarks Called from within the PGM critical section.
685 */
686int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
687{
688 Assert(PGMIsLockOwner(pVM));
689 switch (PGM_PAGE_GET_STATE(pPage))
690 {
691 case PGM_PAGE_STATE_WRITE_MONITORED:
692 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
693 /* fall thru */
694 default: /* to shut up GCC */
695 case PGM_PAGE_STATE_ALLOCATED:
696 return VINF_SUCCESS;
697
698 /*
699 * Zero pages can be dummy pages for MMIO or reserved memory,
700 * so we need to check the flags before joining cause with
701 * shared page replacement.
702 */
703 case PGM_PAGE_STATE_ZERO:
704 if (PGM_PAGE_IS_MMIO(pPage))
705 return VERR_PGM_PHYS_PAGE_RESERVED;
706 /* fall thru */
707 case PGM_PAGE_STATE_SHARED:
708 return pgmPhysAllocPage(pVM, pPage, GCPhys);
709
710 /* Not allowed to write to ballooned pages. */
711 case PGM_PAGE_STATE_BALLOONED:
712 return VERR_PGM_PHYS_PAGE_BALLOONED;
713 }
714}
715
716
717/**
718 * Internal usage: Map the page specified by its GMM ID.
719 *
720 * This is similar to pgmPhysPageMap
721 *
722 * @returns VBox status code.
723 *
724 * @param pVM The VM handle.
725 * @param idPage The Page ID.
726 * @param HCPhys The physical address (for RC).
727 * @param ppv Where to store the mapping address.
728 *
729 * @remarks Called from within the PGM critical section. The mapping is only
730 * valid while your inside this section.
731 */
732int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
733{
734 /*
735 * Validation.
736 */
737 Assert(PGMIsLocked(pVM));
738 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
739 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
740 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
741
742#ifdef IN_RC
743 /*
744 * Map it by HCPhys.
745 */
746 return PGMDynMapHCPage(pVM, HCPhys, ppv);
747
748#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
749 /*
750 * Map it by HCPhys.
751 */
752 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
753
754#else
755 /*
756 * Find/make Chunk TLB entry for the mapping chunk.
757 */
758 PPGMCHUNKR3MAP pMap;
759 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
760 if (pTlbe->idChunk == idChunk)
761 {
762 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
763 pMap = pTlbe->pChunk;
764 }
765 else
766 {
767 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
768
769 /*
770 * Find the chunk, map it if necessary.
771 */
772 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
773 if (!pMap)
774 {
775# ifdef IN_RING0
776 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
777 AssertRCReturn(rc, rc);
778 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
779 Assert(pMap);
780# else
781 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
782 if (RT_FAILURE(rc))
783 return rc;
784# endif
785 }
786
787 /*
788 * Enter it into the Chunk TLB.
789 */
790 pTlbe->idChunk = idChunk;
791 pTlbe->pChunk = pMap;
792 pMap->iAge = 0;
793 }
794
795 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
796 return VINF_SUCCESS;
797#endif
798}
799
800
801/**
802 * Maps a page into the current virtual address space so it can be accessed.
803 *
804 * @returns VBox status code.
805 * @retval VINF_SUCCESS on success.
806 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
807 *
808 * @param pVM The VM address.
809 * @param pPage The physical page tracking structure.
810 * @param GCPhys The address of the page.
811 * @param ppMap Where to store the address of the mapping tracking structure.
812 * @param ppv Where to store the mapping address of the page. The page
813 * offset is masked off!
814 *
815 * @remarks Called from within the PGM critical section.
816 */
817static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
818{
819 Assert(PGMIsLocked(pVM));
820
821#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
822 /*
823 * Just some sketchy GC/R0-darwin code.
824 */
825 *ppMap = NULL;
826 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
827 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
828# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
829 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
830# else
831 PGMDynMapHCPage(pVM, HCPhys, ppv);
832# endif
833 return VINF_SUCCESS;
834
835#else /* IN_RING3 || IN_RING0 */
836
837
838 /*
839 * Special case: ZERO and MMIO2 pages.
840 */
841 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
842 if (idChunk == NIL_GMM_CHUNKID)
843 {
844 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
845 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
846 {
847 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
848 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
849 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
850 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
851 }
852 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
853 {
854 /** @todo deal with aliased MMIO2 pages somehow...
855 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
856 * them, that would also avoid this mess. It would actually be kind of
857 * elegant... */
858 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
859 }
860 else
861 {
862 /** @todo handle MMIO2 */
863 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
864 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
865 ("pPage=%R[pgmpage]\n", pPage),
866 VERR_INTERNAL_ERROR_2);
867 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
868 }
869 *ppMap = NULL;
870 return VINF_SUCCESS;
871 }
872
873 /*
874 * Find/make Chunk TLB entry for the mapping chunk.
875 */
876 PPGMCHUNKR3MAP pMap;
877 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
878 if (pTlbe->idChunk == idChunk)
879 {
880 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
881 pMap = pTlbe->pChunk;
882 }
883 else
884 {
885 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
886
887 /*
888 * Find the chunk, map it if necessary.
889 */
890 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
891 if (!pMap)
892 {
893#ifdef IN_RING0
894 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
895 AssertRCReturn(rc, rc);
896 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
897 Assert(pMap);
898#else
899 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
900 if (RT_FAILURE(rc))
901 return rc;
902#endif
903 }
904
905 /*
906 * Enter it into the Chunk TLB.
907 */
908 pTlbe->idChunk = idChunk;
909 pTlbe->pChunk = pMap;
910 pMap->iAge = 0;
911 }
912
913 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
914 *ppMap = pMap;
915 return VINF_SUCCESS;
916#endif /* IN_RING3 */
917}
918
919
920/**
921 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
922 *
923 * This is typically used is paths where we cannot use the TLB methods (like ROM
924 * pages) or where there is no point in using them since we won't get many hits.
925 *
926 * @returns VBox strict status code.
927 * @retval VINF_SUCCESS on success.
928 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
929 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
930 *
931 * @param pVM The VM address.
932 * @param pPage The physical page tracking structure.
933 * @param GCPhys The address of the page.
934 * @param ppv Where to store the mapping address of the page. The page
935 * offset is masked off!
936 *
937 * @remarks Called from within the PGM critical section. The mapping is only
938 * valid while your inside this section.
939 */
940int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
941{
942 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
943 if (RT_SUCCESS(rc))
944 {
945 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
946 PPGMPAGEMAP pMapIgnore;
947 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
948 if (RT_FAILURE(rc2)) /* preserve rc */
949 rc = rc2;
950 }
951 return rc;
952}
953
954
955/**
956 * Maps a page into the current virtual address space so it can be accessed for
957 * both writing and reading.
958 *
959 * This is typically used is paths where we cannot use the TLB methods (like ROM
960 * pages) or where there is no point in using them since we won't get many hits.
961 *
962 * @returns VBox status code.
963 * @retval VINF_SUCCESS on success.
964 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
965 *
966 * @param pVM The VM address.
967 * @param pPage The physical page tracking structure. Must be in the
968 * allocated state.
969 * @param GCPhys The address of the page.
970 * @param ppv Where to store the mapping address of the page. The page
971 * offset is masked off!
972 *
973 * @remarks Called from within the PGM critical section. The mapping is only
974 * valid while your inside this section.
975 */
976int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
977{
978 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
979 PPGMPAGEMAP pMapIgnore;
980 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
981}
982
983
984/**
985 * Maps a page into the current virtual address space so it can be accessed for
986 * reading.
987 *
988 * This is typically used is paths where we cannot use the TLB methods (like ROM
989 * pages) or where there is no point in using them since we won't get many hits.
990 *
991 * @returns VBox status code.
992 * @retval VINF_SUCCESS on success.
993 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
994 *
995 * @param pVM The VM address.
996 * @param pPage The physical page tracking structure.
997 * @param GCPhys The address of the page.
998 * @param ppv Where to store the mapping address of the page. The page
999 * offset is masked off!
1000 *
1001 * @remarks Called from within the PGM critical section. The mapping is only
1002 * valid while your inside this section.
1003 */
1004int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1005{
1006 PPGMPAGEMAP pMapIgnore;
1007 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1008}
1009
1010
1011#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1012/**
1013 * Load a guest page into the ring-3 physical TLB.
1014 *
1015 * @returns VBox status code.
1016 * @retval VINF_SUCCESS on success
1017 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1018 * @param pPGM The PGM instance pointer.
1019 * @param GCPhys The guest physical address in question.
1020 */
1021int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
1022{
1023 Assert(PGMIsLocked(PGM2VM(pPGM)));
1024 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
1025
1026 /*
1027 * Find the ram range.
1028 * 99.8% of requests are expected to be in the first range.
1029 */
1030 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
1031 RTGCPHYS off = GCPhys - pRam->GCPhys;
1032 if (RT_UNLIKELY(off >= pRam->cb))
1033 {
1034 do
1035 {
1036 pRam = pRam->CTX_SUFF(pNext);
1037 if (!pRam)
1038 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1039 off = GCPhys - pRam->GCPhys;
1040 } while (off >= pRam->cb);
1041 }
1042
1043 /*
1044 * Map the page.
1045 * Make a special case for the zero page as it is kind of special.
1046 */
1047 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
1048 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1049 if ( !PGM_PAGE_IS_ZERO(pPage)
1050 && !PGM_PAGE_IS_BALLOONED(pPage))
1051 {
1052 void *pv;
1053 PPGMPAGEMAP pMap;
1054 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
1055 if (RT_FAILURE(rc))
1056 return rc;
1057 pTlbe->pMap = pMap;
1058 pTlbe->pv = pv;
1059 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1060 }
1061 else
1062 {
1063 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
1064 pTlbe->pMap = NULL;
1065 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
1066 }
1067#ifdef PGM_WITH_PHYS_TLB
1068 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1069#else
1070 pTlbe->GCPhys = NIL_RTGCPHYS;
1071#endif
1072 pTlbe->pPage = pPage;
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Load a guest page into the ring-3 physical TLB.
1079 *
1080 * @returns VBox status code.
1081 * @retval VINF_SUCCESS on success
1082 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1083 *
1084 * @param pPGM The PGM instance pointer.
1085 * @param pPage Pointer to the PGMPAGE structure corresponding to
1086 * GCPhys.
1087 * @param GCPhys The guest physical address in question.
1088 */
1089int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1090{
1091 Assert(PGMIsLocked(PGM2VM(pPGM)));
1092 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
1093
1094 /*
1095 * Map the page.
1096 * Make a special case for the zero page as it is kind of special.
1097 */
1098 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1099 if ( !PGM_PAGE_IS_ZERO(pPage)
1100 && !PGM_PAGE_IS_BALLOONED(pPage))
1101 {
1102 void *pv;
1103 PPGMPAGEMAP pMap;
1104 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
1105 if (RT_FAILURE(rc))
1106 return rc;
1107 pTlbe->pMap = pMap;
1108 pTlbe->pv = pv;
1109 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1110 }
1111 else
1112 {
1113 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
1114 pTlbe->pMap = NULL;
1115 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
1116 }
1117#ifdef PGM_WITH_PHYS_TLB
1118 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1119#else
1120 pTlbe->GCPhys = NIL_RTGCPHYS;
1121#endif
1122 pTlbe->pPage = pPage;
1123 return VINF_SUCCESS;
1124}
1125#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1126
1127
1128/**
1129 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1130 * own the PGM lock and therefore not need to lock the mapped page.
1131 *
1132 * @returns VBox status code.
1133 * @retval VINF_SUCCESS on success.
1134 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1135 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1136 *
1137 * @param pVM The VM handle.
1138 * @param GCPhys The guest physical address of the page that should be mapped.
1139 * @param pPage Pointer to the PGMPAGE structure for the page.
1140 * @param ppv Where to store the address corresponding to GCPhys.
1141 *
1142 * @internal
1143 */
1144int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1145{
1146 int rc;
1147 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1148 Assert(PGMIsLocked(pVM));
1149
1150 /*
1151 * Make sure the page is writable.
1152 */
1153 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1154 {
1155 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1156 if (RT_FAILURE(rc))
1157 return rc;
1158 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1159 }
1160 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1161
1162 /*
1163 * Get the mapping address.
1164 */
1165#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1166 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
1167#else
1168 PPGMPAGEMAPTLBE pTlbe;
1169 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1170 if (RT_FAILURE(rc))
1171 return rc;
1172 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1173#endif
1174 return VINF_SUCCESS;
1175}
1176
1177
1178/**
1179 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1180 * own the PGM lock and therefore not need to lock the mapped page.
1181 *
1182 * @returns VBox status code.
1183 * @retval VINF_SUCCESS on success.
1184 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1185 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1186 *
1187 * @param pVM The VM handle.
1188 * @param GCPhys The guest physical address of the page that should be mapped.
1189 * @param pPage Pointer to the PGMPAGE structure for the page.
1190 * @param ppv Where to store the address corresponding to GCPhys.
1191 *
1192 * @internal
1193 */
1194int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
1195{
1196 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1197 Assert(PGMIsLocked(pVM));
1198 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1199
1200 /*
1201 * Get the mapping address.
1202 */
1203#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1204 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1205#else
1206 PPGMPAGEMAPTLBE pTlbe;
1207 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1208 if (RT_FAILURE(rc))
1209 return rc;
1210 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1211#endif
1212 return VINF_SUCCESS;
1213}
1214
1215
1216/**
1217 * Requests the mapping of a guest page into the current context.
1218 *
1219 * This API should only be used for very short term, as it will consume
1220 * scarse resources (R0 and GC) in the mapping cache. When you're done
1221 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1222 *
1223 * This API will assume your intention is to write to the page, and will
1224 * therefore replace shared and zero pages. If you do not intend to modify
1225 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1226 *
1227 * @returns VBox status code.
1228 * @retval VINF_SUCCESS on success.
1229 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1230 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1231 *
1232 * @param pVM The VM handle.
1233 * @param GCPhys The guest physical address of the page that should be mapped.
1234 * @param ppv Where to store the address corresponding to GCPhys.
1235 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1236 *
1237 * @remarks The caller is responsible for dealing with access handlers.
1238 * @todo Add an informational return code for pages with access handlers?
1239 *
1240 * @remark Avoid calling this API from within critical sections (other than the
1241 * PGM one) because of the deadlock risk. External threads may need to
1242 * delegate jobs to the EMTs.
1243 * @thread Any thread.
1244 */
1245VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1246{
1247#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1248
1249 /*
1250 * Find the page and make sure it's writable.
1251 */
1252 PPGMPAGE pPage;
1253 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1254 if (RT_SUCCESS(rc))
1255 {
1256 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1257 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1258 if (RT_SUCCESS(rc))
1259 {
1260 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1261# if 0
1262 pLock->pvMap = 0;
1263 pLock->pvPage = pPage;
1264# else
1265 pLock->u32Dummy = UINT32_MAX;
1266# endif
1267 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1268 rc = VINF_SUCCESS;
1269 }
1270 }
1271
1272#else /* IN_RING3 || IN_RING0 */
1273 int rc = pgmLock(pVM);
1274 AssertRCReturn(rc, rc);
1275
1276 /*
1277 * Query the Physical TLB entry for the page (may fail).
1278 */
1279 PPGMPAGEMAPTLBE pTlbe;
1280 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1281 if (RT_SUCCESS(rc))
1282 {
1283 /*
1284 * If the page is shared, the zero page, or being write monitored
1285 * it must be converted to a page that's writable if possible.
1286 */
1287 PPGMPAGE pPage = pTlbe->pPage;
1288 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1289 {
1290 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1291 if (RT_SUCCESS(rc))
1292 {
1293 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1294 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1295 }
1296 }
1297 if (RT_SUCCESS(rc))
1298 {
1299 /*
1300 * Now, just perform the locking and calculate the return address.
1301 */
1302 PPGMPAGEMAP pMap = pTlbe->pMap;
1303 if (pMap)
1304 pMap->cRefs++;
1305
1306 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1307 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1308 {
1309 if (cLocks == 0)
1310 pVM->pgm.s.cWriteLockedPages++;
1311 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1312 }
1313 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1314 {
1315 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1316 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1317 if (pMap)
1318 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1319 }
1320
1321 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1322 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1323 pLock->pvMap = pMap;
1324 }
1325 }
1326
1327 pgmUnlock(pVM);
1328#endif /* IN_RING3 || IN_RING0 */
1329 return rc;
1330}
1331
1332
1333/**
1334 * Requests the mapping of a guest page into the current context.
1335 *
1336 * This API should only be used for very short term, as it will consume
1337 * scarse resources (R0 and GC) in the mapping cache. When you're done
1338 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1339 *
1340 * @returns VBox status code.
1341 * @retval VINF_SUCCESS on success.
1342 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1343 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1344 *
1345 * @param pVM The VM handle.
1346 * @param GCPhys The guest physical address of the page that should be mapped.
1347 * @param ppv Where to store the address corresponding to GCPhys.
1348 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1349 *
1350 * @remarks The caller is responsible for dealing with access handlers.
1351 * @todo Add an informational return code for pages with access handlers?
1352 *
1353 * @remark Avoid calling this API from within critical sections (other than
1354 * the PGM one) because of the deadlock risk.
1355 * @thread Any thread.
1356 */
1357VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1358{
1359#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1360
1361 /*
1362 * Find the page and make sure it's readable.
1363 */
1364 PPGMPAGE pPage;
1365 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1366 if (RT_SUCCESS(rc))
1367 {
1368 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1369 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1370 else
1371 {
1372 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1373# if 0
1374 pLock->pvMap = 0;
1375 pLock->pvPage = pPage;
1376# else
1377 pLock->u32Dummy = UINT32_MAX;
1378# endif
1379 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1380 rc = VINF_SUCCESS;
1381 }
1382 }
1383
1384#else /* IN_RING3 || IN_RING0 */
1385 int rc = pgmLock(pVM);
1386 AssertRCReturn(rc, rc);
1387
1388 /*
1389 * Query the Physical TLB entry for the page (may fail).
1390 */
1391 PPGMPAGEMAPTLBE pTlbe;
1392 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1393 if (RT_SUCCESS(rc))
1394 {
1395 /* MMIO pages doesn't have any readable backing. */
1396 PPGMPAGE pPage = pTlbe->pPage;
1397 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1398 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1399 else
1400 {
1401 /*
1402 * Now, just perform the locking and calculate the return address.
1403 */
1404 PPGMPAGEMAP pMap = pTlbe->pMap;
1405 if (pMap)
1406 pMap->cRefs++;
1407
1408 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1409 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1410 {
1411 if (cLocks == 0)
1412 pVM->pgm.s.cReadLockedPages++;
1413 PGM_PAGE_INC_READ_LOCKS(pPage);
1414 }
1415 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1416 {
1417 PGM_PAGE_INC_READ_LOCKS(pPage);
1418 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1419 if (pMap)
1420 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1421 }
1422
1423 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1424 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1425 pLock->pvMap = pMap;
1426 }
1427 }
1428
1429 pgmUnlock(pVM);
1430#endif /* IN_RING3 || IN_RING0 */
1431 return rc;
1432}
1433
1434
1435/**
1436 * Requests the mapping of a guest page given by virtual address into the current context.
1437 *
1438 * This API should only be used for very short term, as it will consume
1439 * scarse resources (R0 and GC) in the mapping cache. When you're done
1440 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1441 *
1442 * This API will assume your intention is to write to the page, and will
1443 * therefore replace shared and zero pages. If you do not intend to modify
1444 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1445 *
1446 * @returns VBox status code.
1447 * @retval VINF_SUCCESS on success.
1448 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1449 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1450 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1451 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1452 *
1453 * @param pVCpu VMCPU handle.
1454 * @param GCPhys The guest physical address of the page that should be mapped.
1455 * @param ppv Where to store the address corresponding to GCPhys.
1456 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1457 *
1458 * @remark Avoid calling this API from within critical sections (other than
1459 * the PGM one) because of the deadlock risk.
1460 * @thread EMT
1461 */
1462VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1463{
1464 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1465 RTGCPHYS GCPhys;
1466 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1467 if (RT_SUCCESS(rc))
1468 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1469 return rc;
1470}
1471
1472
1473/**
1474 * Requests the mapping of a guest page given by virtual address into the current context.
1475 *
1476 * This API should only be used for very short term, as it will consume
1477 * scarse resources (R0 and GC) in the mapping cache. When you're done
1478 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1479 *
1480 * @returns VBox status code.
1481 * @retval VINF_SUCCESS on success.
1482 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1483 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1484 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1485 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1486 *
1487 * @param pVCpu VMCPU handle.
1488 * @param GCPhys The guest physical address of the page that should be mapped.
1489 * @param ppv Where to store the address corresponding to GCPhys.
1490 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1491 *
1492 * @remark Avoid calling this API from within critical sections (other than
1493 * the PGM one) because of the deadlock risk.
1494 * @thread EMT
1495 */
1496VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1497{
1498 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1499 RTGCPHYS GCPhys;
1500 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1501 if (RT_SUCCESS(rc))
1502 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1503 return rc;
1504}
1505
1506
1507/**
1508 * Release the mapping of a guest page.
1509 *
1510 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1511 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1512 *
1513 * @param pVM The VM handle.
1514 * @param pLock The lock structure initialized by the mapping function.
1515 */
1516VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1517{
1518#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1519 /* currently nothing to do here. */
1520 Assert(pLock->u32Dummy == UINT32_MAX);
1521 pLock->u32Dummy = 0;
1522
1523#else /* IN_RING3 */
1524 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1525 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1526 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1527
1528 pLock->uPageAndType = 0;
1529 pLock->pvMap = NULL;
1530
1531 pgmLock(pVM);
1532 if (fWriteLock)
1533 {
1534 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1535 Assert(cLocks > 0);
1536 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1537 {
1538 if (cLocks == 1)
1539 {
1540 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1541 pVM->pgm.s.cWriteLockedPages--;
1542 }
1543 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1544 }
1545
1546 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1547 {
1548 PGM_PAGE_SET_WRITTEN_TO(pPage);
1549 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1550 Assert(pVM->pgm.s.cMonitoredPages > 0);
1551 pVM->pgm.s.cMonitoredPages--;
1552 pVM->pgm.s.cWrittenToPages++;
1553 }
1554 }
1555 else
1556 {
1557 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1558 Assert(cLocks > 0);
1559 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1560 {
1561 if (cLocks == 1)
1562 {
1563 Assert(pVM->pgm.s.cReadLockedPages > 0);
1564 pVM->pgm.s.cReadLockedPages--;
1565 }
1566 PGM_PAGE_DEC_READ_LOCKS(pPage);
1567 }
1568 }
1569
1570 if (pMap)
1571 {
1572 Assert(pMap->cRefs >= 1);
1573 pMap->cRefs--;
1574 pMap->iAge = 0;
1575 }
1576 pgmUnlock(pVM);
1577#endif /* IN_RING3 */
1578}
1579
1580
1581/**
1582 * Converts a GC physical address to a HC ring-3 pointer.
1583 *
1584 * @returns VINF_SUCCESS on success.
1585 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1586 * page but has no physical backing.
1587 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1588 * GC physical address.
1589 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1590 * a dynamic ram chunk boundary
1591 *
1592 * @param pVM The VM handle.
1593 * @param GCPhys The GC physical address to convert.
1594 * @param cbRange Physical range
1595 * @param pR3Ptr Where to store the R3 pointer on success.
1596 *
1597 * @deprecated Avoid when possible!
1598 */
1599VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1600{
1601/** @todo this is kind of hacky and needs some more work. */
1602#ifndef DEBUG_sandervl
1603 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1604#endif
1605
1606 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1607#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1608 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1609#else
1610 pgmLock(pVM);
1611
1612 PPGMRAMRANGE pRam;
1613 PPGMPAGE pPage;
1614 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1615 if (RT_SUCCESS(rc))
1616 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1617
1618 pgmUnlock(pVM);
1619 Assert(rc <= VINF_SUCCESS);
1620 return rc;
1621#endif
1622}
1623
1624
1625#ifdef VBOX_STRICT
1626/**
1627 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1628 *
1629 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1630 * @param pVM The VM handle.
1631 * @param GCPhys The GC Physical addresss.
1632 * @param cbRange Physical range.
1633 *
1634 * @deprecated Avoid when possible.
1635 */
1636VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1637{
1638 RTR3PTR R3Ptr;
1639 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1640 if (RT_SUCCESS(rc))
1641 return R3Ptr;
1642 return NIL_RTR3PTR;
1643}
1644#endif /* VBOX_STRICT */
1645
1646
1647/**
1648 * Converts a guest pointer to a GC physical address.
1649 *
1650 * This uses the current CR3/CR0/CR4 of the guest.
1651 *
1652 * @returns VBox status code.
1653 * @param pVCpu The VMCPU Handle
1654 * @param GCPtr The guest pointer to convert.
1655 * @param pGCPhys Where to store the GC physical address.
1656 */
1657VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1658{
1659 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1660 if (pGCPhys && RT_SUCCESS(rc))
1661 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1662 return rc;
1663}
1664
1665
1666/**
1667 * Converts a guest pointer to a HC physical address.
1668 *
1669 * This uses the current CR3/CR0/CR4 of the guest.
1670 *
1671 * @returns VBox status code.
1672 * @param pVCpu The VMCPU Handle
1673 * @param GCPtr The guest pointer to convert.
1674 * @param pHCPhys Where to store the HC physical address.
1675 */
1676VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1677{
1678 PVM pVM = pVCpu->CTX_SUFF(pVM);
1679 RTGCPHYS GCPhys;
1680 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1681 if (RT_SUCCESS(rc))
1682 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1683 return rc;
1684}
1685
1686
1687/**
1688 * Converts a guest pointer to a R3 pointer.
1689 *
1690 * This uses the current CR3/CR0/CR4 of the guest.
1691 *
1692 * @returns VBox status code.
1693 * @param pVCpu The VMCPU Handle
1694 * @param GCPtr The guest pointer to convert.
1695 * @param pR3Ptr Where to store the R3 virtual address.
1696 *
1697 * @deprecated Don't use this.
1698 */
1699VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1700{
1701 PVM pVM = pVCpu->CTX_SUFF(pVM);
1702 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1703 RTGCPHYS GCPhys;
1704 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1705 if (RT_SUCCESS(rc))
1706 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1707 return rc;
1708}
1709
1710
1711
1712#undef LOG_GROUP
1713#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1714
1715
1716#ifdef IN_RING3
1717/**
1718 * Cache PGMPhys memory access
1719 *
1720 * @param pVM VM Handle.
1721 * @param pCache Cache structure pointer
1722 * @param GCPhys GC physical address
1723 * @param pbHC HC pointer corresponding to physical page
1724 *
1725 * @thread EMT.
1726 */
1727static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1728{
1729 uint32_t iCacheIndex;
1730
1731 Assert(VM_IS_EMT(pVM));
1732
1733 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1734 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1735
1736 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1737
1738 ASMBitSet(&pCache->aEntries, iCacheIndex);
1739
1740 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1741 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1742}
1743#endif /* IN_RING3 */
1744
1745
1746/**
1747 * Deals with reading from a page with one or more ALL access handlers.
1748 *
1749 * @returns VBox status code. Can be ignored in ring-3.
1750 * @retval VINF_SUCCESS.
1751 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1752 *
1753 * @param pVM The VM handle.
1754 * @param pPage The page descriptor.
1755 * @param GCPhys The physical address to start reading at.
1756 * @param pvBuf Where to put the bits we read.
1757 * @param cb How much to read - less or equal to a page.
1758 */
1759static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1760{
1761 /*
1762 * The most frequent access here is MMIO and shadowed ROM.
1763 * The current code ASSUMES all these access handlers covers full pages!
1764 */
1765
1766 /*
1767 * Whatever we do we need the source page, map it first.
1768 */
1769 const void *pvSrc = NULL;
1770 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1771 if (RT_FAILURE(rc))
1772 {
1773 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1774 GCPhys, pPage, rc));
1775 memset(pvBuf, 0xff, cb);
1776 return VINF_SUCCESS;
1777 }
1778 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1779
1780 /*
1781 * Deal with any physical handlers.
1782 */
1783 PPGMPHYSHANDLER pPhys = NULL;
1784 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1785 {
1786#ifdef IN_RING3
1787 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1788 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1789 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1790 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1791 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1792 Assert(pPhys->CTX_SUFF(pfnHandler));
1793
1794 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1795 void *pvUser = pPhys->CTX_SUFF(pvUser);
1796
1797 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1798 STAM_PROFILE_START(&pPhys->Stat, h);
1799 Assert(PGMIsLockOwner(pVM));
1800 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1801 pgmUnlock(pVM);
1802 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1803 pgmLock(pVM);
1804# ifdef VBOX_WITH_STATISTICS
1805 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1806 if (pPhys)
1807 STAM_PROFILE_STOP(&pPhys->Stat, h);
1808# else
1809 pPhys = NULL; /* might not be valid anymore. */
1810# endif
1811 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1812#else
1813 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1814 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1815 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1816#endif
1817 }
1818
1819 /*
1820 * Deal with any virtual handlers.
1821 */
1822 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1823 {
1824 unsigned iPage;
1825 PPGMVIRTHANDLER pVirt;
1826
1827 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1828 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1829 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1830 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1831 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1832
1833#ifdef IN_RING3
1834 if (pVirt->pfnHandlerR3)
1835 {
1836 if (!pPhys)
1837 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1838 else
1839 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1840 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1841 + (iPage << PAGE_SHIFT)
1842 + (GCPhys & PAGE_OFFSET_MASK);
1843
1844 STAM_PROFILE_START(&pVirt->Stat, h);
1845 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1846 STAM_PROFILE_STOP(&pVirt->Stat, h);
1847 if (rc2 == VINF_SUCCESS)
1848 rc = VINF_SUCCESS;
1849 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1850 }
1851 else
1852 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1853#else
1854 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1855 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1856 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1857#endif
1858 }
1859
1860 /*
1861 * Take the default action.
1862 */
1863 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1864 memcpy(pvBuf, pvSrc, cb);
1865 return rc;
1866}
1867
1868
1869/**
1870 * Read physical memory.
1871 *
1872 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1873 * want to ignore those.
1874 *
1875 * @returns VBox status code. Can be ignored in ring-3.
1876 * @retval VINF_SUCCESS.
1877 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1878 *
1879 * @param pVM VM Handle.
1880 * @param GCPhys Physical address start reading from.
1881 * @param pvBuf Where to put the read bits.
1882 * @param cbRead How many bytes to read.
1883 */
1884VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1885{
1886 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1887 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1888
1889 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1890 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1891
1892 pgmLock(pVM);
1893
1894 /*
1895 * Copy loop on ram ranges.
1896 */
1897 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1898 for (;;)
1899 {
1900 /* Find range. */
1901 while (pRam && GCPhys > pRam->GCPhysLast)
1902 pRam = pRam->CTX_SUFF(pNext);
1903 /* Inside range or not? */
1904 if (pRam && GCPhys >= pRam->GCPhys)
1905 {
1906 /*
1907 * Must work our way thru this page by page.
1908 */
1909 RTGCPHYS off = GCPhys - pRam->GCPhys;
1910 while (off < pRam->cb)
1911 {
1912 unsigned iPage = off >> PAGE_SHIFT;
1913 PPGMPAGE pPage = &pRam->aPages[iPage];
1914 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1915 if (cb > cbRead)
1916 cb = cbRead;
1917
1918 /*
1919 * Any ALL access handlers?
1920 */
1921 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1922 {
1923 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1924 if (RT_FAILURE(rc))
1925 {
1926 pgmUnlock(pVM);
1927 return rc;
1928 }
1929 }
1930 else
1931 {
1932 /*
1933 * Get the pointer to the page.
1934 */
1935 const void *pvSrc;
1936 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1937 if (RT_SUCCESS(rc))
1938 memcpy(pvBuf, pvSrc, cb);
1939 else
1940 {
1941 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1942 pRam->GCPhys + off, pPage, rc));
1943 memset(pvBuf, 0xff, cb);
1944 }
1945 }
1946
1947 /* next page */
1948 if (cb >= cbRead)
1949 {
1950 pgmUnlock(pVM);
1951 return VINF_SUCCESS;
1952 }
1953 cbRead -= cb;
1954 off += cb;
1955 pvBuf = (char *)pvBuf + cb;
1956 } /* walk pages in ram range. */
1957
1958 GCPhys = pRam->GCPhysLast + 1;
1959 }
1960 else
1961 {
1962 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1963
1964 /*
1965 * Unassigned address space.
1966 */
1967 if (!pRam)
1968 break;
1969 size_t cb = pRam->GCPhys - GCPhys;
1970 if (cb >= cbRead)
1971 {
1972 memset(pvBuf, 0xff, cbRead);
1973 break;
1974 }
1975 memset(pvBuf, 0xff, cb);
1976
1977 cbRead -= cb;
1978 pvBuf = (char *)pvBuf + cb;
1979 GCPhys += cb;
1980 }
1981 } /* Ram range walk */
1982
1983 pgmUnlock(pVM);
1984 return VINF_SUCCESS;
1985}
1986
1987
1988/**
1989 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1990 *
1991 * @returns VBox status code. Can be ignored in ring-3.
1992 * @retval VINF_SUCCESS.
1993 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1994 *
1995 * @param pVM The VM handle.
1996 * @param pPage The page descriptor.
1997 * @param GCPhys The physical address to start writing at.
1998 * @param pvBuf What to write.
1999 * @param cbWrite How much to write - less or equal to a page.
2000 */
2001static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2002{
2003 void *pvDst = NULL;
2004 int rc;
2005
2006 /*
2007 * Give priority to physical handlers (like #PF does).
2008 *
2009 * Hope for a lonely physical handler first that covers the whole
2010 * write area. This should be a pretty frequent case with MMIO and
2011 * the heavy usage of full page handlers in the page pool.
2012 */
2013 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2014 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
2015 {
2016 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2017 if (pCur)
2018 {
2019 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2020 Assert(pCur->CTX_SUFF(pfnHandler));
2021
2022 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2023 if (cbRange > cbWrite)
2024 cbRange = cbWrite;
2025
2026#ifndef IN_RING3
2027 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2028 NOREF(cbRange);
2029 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2030 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2031
2032#else /* IN_RING3 */
2033 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2034 if (!PGM_PAGE_IS_MMIO(pPage))
2035 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2036 else
2037 rc = VINF_SUCCESS;
2038 if (RT_SUCCESS(rc))
2039 {
2040 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2041 void *pvUser = pCur->CTX_SUFF(pvUser);
2042
2043 STAM_PROFILE_START(&pCur->Stat, h);
2044 Assert(PGMIsLockOwner(pVM));
2045 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2046 pgmUnlock(pVM);
2047 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2048 pgmLock(pVM);
2049# ifdef VBOX_WITH_STATISTICS
2050 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2051 if (pCur)
2052 STAM_PROFILE_STOP(&pCur->Stat, h);
2053# else
2054 pCur = NULL; /* might not be valid anymore. */
2055# endif
2056 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2057 memcpy(pvDst, pvBuf, cbRange);
2058 else
2059 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2060 }
2061 else
2062 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2063 GCPhys, pPage, rc), rc);
2064 if (RT_LIKELY(cbRange == cbWrite))
2065 return VINF_SUCCESS;
2066
2067 /* more fun to be had below */
2068 cbWrite -= cbRange;
2069 GCPhys += cbRange;
2070 pvBuf = (uint8_t *)pvBuf + cbRange;
2071 pvDst = (uint8_t *)pvDst + cbRange;
2072#endif /* IN_RING3 */
2073 }
2074 /* else: the handler is somewhere else in the page, deal with it below. */
2075 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2076 }
2077 /*
2078 * A virtual handler without any interfering physical handlers.
2079 * Hopefully it'll conver the whole write.
2080 */
2081 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2082 {
2083 unsigned iPage;
2084 PPGMVIRTHANDLER pCur;
2085 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2086 if (RT_SUCCESS(rc))
2087 {
2088 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2089 if (cbRange > cbWrite)
2090 cbRange = cbWrite;
2091
2092#ifndef IN_RING3
2093 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2094 NOREF(cbRange);
2095 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2096 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2097
2098#else /* IN_RING3 */
2099
2100 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2101 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2102 if (RT_SUCCESS(rc))
2103 {
2104 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2105 if (pCur->pfnHandlerR3)
2106 {
2107 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2108 + (iPage << PAGE_SHIFT)
2109 + (GCPhys & PAGE_OFFSET_MASK);
2110
2111 STAM_PROFILE_START(&pCur->Stat, h);
2112 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2113 STAM_PROFILE_STOP(&pCur->Stat, h);
2114 }
2115 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2116 memcpy(pvDst, pvBuf, cbRange);
2117 else
2118 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2119 }
2120 else
2121 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2122 GCPhys, pPage, rc), rc);
2123 if (RT_LIKELY(cbRange == cbWrite))
2124 return VINF_SUCCESS;
2125
2126 /* more fun to be had below */
2127 cbWrite -= cbRange;
2128 GCPhys += cbRange;
2129 pvBuf = (uint8_t *)pvBuf + cbRange;
2130 pvDst = (uint8_t *)pvDst + cbRange;
2131#endif
2132 }
2133 /* else: the handler is somewhere else in the page, deal with it below. */
2134 }
2135
2136 /*
2137 * Deal with all the odd ends.
2138 */
2139
2140 /* We need a writable destination page. */
2141 if (!pvDst)
2142 {
2143 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2144 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2145 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2146 GCPhys, pPage, rc), rc);
2147 }
2148
2149 /* The loop state (big + ugly). */
2150 unsigned iVirtPage = 0;
2151 PPGMVIRTHANDLER pVirt = NULL;
2152 uint32_t offVirt = PAGE_SIZE;
2153 uint32_t offVirtLast = PAGE_SIZE;
2154 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2155
2156 PPGMPHYSHANDLER pPhys = NULL;
2157 uint32_t offPhys = PAGE_SIZE;
2158 uint32_t offPhysLast = PAGE_SIZE;
2159 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2160
2161 /* The loop. */
2162 for (;;)
2163 {
2164 /*
2165 * Find the closest handler at or above GCPhys.
2166 */
2167 if (fMoreVirt && !pVirt)
2168 {
2169 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2170 if (RT_SUCCESS(rc))
2171 {
2172 offVirt = 0;
2173 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2174 }
2175 else
2176 {
2177 PPGMPHYS2VIRTHANDLER pVirtPhys;
2178 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2179 GCPhys, true /* fAbove */);
2180 if ( pVirtPhys
2181 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2182 {
2183 /* ASSUME that pVirtPhys only covers one page. */
2184 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2185 Assert(pVirtPhys->Core.Key > GCPhys);
2186
2187 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2188 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2189 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2190 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2191 }
2192 else
2193 {
2194 pVirt = NULL;
2195 fMoreVirt = false;
2196 offVirt = offVirtLast = PAGE_SIZE;
2197 }
2198 }
2199 }
2200
2201 if (fMorePhys && !pPhys)
2202 {
2203 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2204 if (pPhys)
2205 {
2206 offPhys = 0;
2207 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2208 }
2209 else
2210 {
2211 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2212 GCPhys, true /* fAbove */);
2213 if ( pPhys
2214 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2215 {
2216 offPhys = pPhys->Core.Key - GCPhys;
2217 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2218 }
2219 else
2220 {
2221 pPhys = NULL;
2222 fMorePhys = false;
2223 offPhys = offPhysLast = PAGE_SIZE;
2224 }
2225 }
2226 }
2227
2228 /*
2229 * Handle access to space without handlers (that's easy).
2230 */
2231 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2232 uint32_t cbRange = (uint32_t)cbWrite;
2233 if (offPhys && offVirt)
2234 {
2235 if (cbRange > offPhys)
2236 cbRange = offPhys;
2237 if (cbRange > offVirt)
2238 cbRange = offVirt;
2239 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2240 }
2241 /*
2242 * Physical handler.
2243 */
2244 else if (!offPhys && offVirt)
2245 {
2246 if (cbRange > offPhysLast + 1)
2247 cbRange = offPhysLast + 1;
2248 if (cbRange > offVirt)
2249 cbRange = offVirt;
2250#ifdef IN_RING3
2251 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2252 void *pvUser = pPhys->CTX_SUFF(pvUser);
2253
2254 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2255 STAM_PROFILE_START(&pPhys->Stat, h);
2256 Assert(PGMIsLockOwner(pVM));
2257 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2258 pgmUnlock(pVM);
2259 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2260 pgmLock(pVM);
2261# ifdef VBOX_WITH_STATISTICS
2262 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2263 if (pPhys)
2264 STAM_PROFILE_STOP(&pPhys->Stat, h);
2265# else
2266 pPhys = NULL; /* might not be valid anymore. */
2267# endif
2268 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2269#else
2270 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2271 NOREF(cbRange);
2272 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2273 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2274#endif
2275 }
2276 /*
2277 * Virtual handler.
2278 */
2279 else if (offPhys && !offVirt)
2280 {
2281 if (cbRange > offVirtLast + 1)
2282 cbRange = offVirtLast + 1;
2283 if (cbRange > offPhys)
2284 cbRange = offPhys;
2285#ifdef IN_RING3
2286 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2287 if (pVirt->pfnHandlerR3)
2288 {
2289 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2290 + (iVirtPage << PAGE_SHIFT)
2291 + (GCPhys & PAGE_OFFSET_MASK);
2292 STAM_PROFILE_START(&pVirt->Stat, h);
2293 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2294 STAM_PROFILE_STOP(&pVirt->Stat, h);
2295 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2296 }
2297 pVirt = NULL;
2298#else
2299 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2300 NOREF(cbRange);
2301 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2302 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2303#endif
2304 }
2305 /*
2306 * Both... give the physical one priority.
2307 */
2308 else
2309 {
2310 Assert(!offPhys && !offVirt);
2311 if (cbRange > offVirtLast + 1)
2312 cbRange = offVirtLast + 1;
2313 if (cbRange > offPhysLast + 1)
2314 cbRange = offPhysLast + 1;
2315
2316#ifdef IN_RING3
2317 if (pVirt->pfnHandlerR3)
2318 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2319 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2320
2321 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2322 void *pvUser = pPhys->CTX_SUFF(pvUser);
2323
2324 STAM_PROFILE_START(&pPhys->Stat, h);
2325 Assert(PGMIsLockOwner(pVM));
2326 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2327 pgmUnlock(pVM);
2328 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2329 pgmLock(pVM);
2330# ifdef VBOX_WITH_STATISTICS
2331 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2332 if (pPhys)
2333 STAM_PROFILE_STOP(&pPhys->Stat, h);
2334# else
2335 pPhys = NULL; /* might not be valid anymore. */
2336# endif
2337 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2338 if (pVirt->pfnHandlerR3)
2339 {
2340
2341 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2342 + (iVirtPage << PAGE_SHIFT)
2343 + (GCPhys & PAGE_OFFSET_MASK);
2344 STAM_PROFILE_START(&pVirt->Stat, h2);
2345 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2346 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2347 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2348 rc = VINF_SUCCESS;
2349 else
2350 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2351 }
2352 pPhys = NULL;
2353 pVirt = NULL;
2354#else
2355 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2356 NOREF(cbRange);
2357 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2358 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2359#endif
2360 }
2361 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2362 memcpy(pvDst, pvBuf, cbRange);
2363
2364 /*
2365 * Advance if we've got more stuff to do.
2366 */
2367 if (cbRange >= cbWrite)
2368 return VINF_SUCCESS;
2369
2370 cbWrite -= cbRange;
2371 GCPhys += cbRange;
2372 pvBuf = (uint8_t *)pvBuf + cbRange;
2373 pvDst = (uint8_t *)pvDst + cbRange;
2374
2375 offPhys -= cbRange;
2376 offPhysLast -= cbRange;
2377 offVirt -= cbRange;
2378 offVirtLast -= cbRange;
2379 }
2380}
2381
2382
2383/**
2384 * Write to physical memory.
2385 *
2386 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2387 * want to ignore those.
2388 *
2389 * @returns VBox status code. Can be ignored in ring-3.
2390 * @retval VINF_SUCCESS.
2391 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2392 *
2393 * @param pVM VM Handle.
2394 * @param GCPhys Physical address to write to.
2395 * @param pvBuf What to write.
2396 * @param cbWrite How many bytes to write.
2397 */
2398VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2399{
2400 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2401 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2402 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2403
2404 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2405 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2406
2407 pgmLock(pVM);
2408
2409 /*
2410 * Copy loop on ram ranges.
2411 */
2412 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2413 for (;;)
2414 {
2415 /* Find range. */
2416 while (pRam && GCPhys > pRam->GCPhysLast)
2417 pRam = pRam->CTX_SUFF(pNext);
2418 /* Inside range or not? */
2419 if (pRam && GCPhys >= pRam->GCPhys)
2420 {
2421 /*
2422 * Must work our way thru this page by page.
2423 */
2424 RTGCPTR off = GCPhys - pRam->GCPhys;
2425 while (off < pRam->cb)
2426 {
2427 RTGCPTR iPage = off >> PAGE_SHIFT;
2428 PPGMPAGE pPage = &pRam->aPages[iPage];
2429 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2430 if (cb > cbWrite)
2431 cb = cbWrite;
2432
2433 /*
2434 * Any active WRITE or ALL access handlers?
2435 */
2436 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2437 {
2438 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2439 if (RT_FAILURE(rc))
2440 {
2441 pgmUnlock(pVM);
2442 return rc;
2443 }
2444 }
2445 else
2446 {
2447 /*
2448 * Get the pointer to the page.
2449 */
2450 void *pvDst;
2451 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2452 if (RT_SUCCESS(rc))
2453 {
2454 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2455 memcpy(pvDst, pvBuf, cb);
2456 }
2457 else
2458 /* Ignore writes to ballooned pages. */
2459 if (!PGM_PAGE_IS_BALLOONED(pPage))
2460 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2461 pRam->GCPhys + off, pPage, rc));
2462 }
2463
2464 /* next page */
2465 if (cb >= cbWrite)
2466 {
2467 pgmUnlock(pVM);
2468 return VINF_SUCCESS;
2469 }
2470
2471 cbWrite -= cb;
2472 off += cb;
2473 pvBuf = (const char *)pvBuf + cb;
2474 } /* walk pages in ram range */
2475
2476 GCPhys = pRam->GCPhysLast + 1;
2477 }
2478 else
2479 {
2480 /*
2481 * Unassigned address space, skip it.
2482 */
2483 if (!pRam)
2484 break;
2485 size_t cb = pRam->GCPhys - GCPhys;
2486 if (cb >= cbWrite)
2487 break;
2488 cbWrite -= cb;
2489 pvBuf = (const char *)pvBuf + cb;
2490 GCPhys += cb;
2491 }
2492 } /* Ram range walk */
2493
2494 pgmUnlock(pVM);
2495 return VINF_SUCCESS;
2496}
2497
2498
2499/**
2500 * Read from guest physical memory by GC physical address, bypassing
2501 * MMIO and access handlers.
2502 *
2503 * @returns VBox status.
2504 * @param pVM VM handle.
2505 * @param pvDst The destination address.
2506 * @param GCPhysSrc The source address (GC physical address).
2507 * @param cb The number of bytes to read.
2508 */
2509VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2510{
2511 /*
2512 * Treat the first page as a special case.
2513 */
2514 if (!cb)
2515 return VINF_SUCCESS;
2516
2517 /* map the 1st page */
2518 void const *pvSrc;
2519 PGMPAGEMAPLOCK Lock;
2520 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2521 if (RT_FAILURE(rc))
2522 return rc;
2523
2524 /* optimize for the case where access is completely within the first page. */
2525 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2526 if (RT_LIKELY(cb <= cbPage))
2527 {
2528 memcpy(pvDst, pvSrc, cb);
2529 PGMPhysReleasePageMappingLock(pVM, &Lock);
2530 return VINF_SUCCESS;
2531 }
2532
2533 /* copy to the end of the page. */
2534 memcpy(pvDst, pvSrc, cbPage);
2535 PGMPhysReleasePageMappingLock(pVM, &Lock);
2536 GCPhysSrc += cbPage;
2537 pvDst = (uint8_t *)pvDst + cbPage;
2538 cb -= cbPage;
2539
2540 /*
2541 * Page by page.
2542 */
2543 for (;;)
2544 {
2545 /* map the page */
2546 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2547 if (RT_FAILURE(rc))
2548 return rc;
2549
2550 /* last page? */
2551 if (cb <= PAGE_SIZE)
2552 {
2553 memcpy(pvDst, pvSrc, cb);
2554 PGMPhysReleasePageMappingLock(pVM, &Lock);
2555 return VINF_SUCCESS;
2556 }
2557
2558 /* copy the entire page and advance */
2559 memcpy(pvDst, pvSrc, PAGE_SIZE);
2560 PGMPhysReleasePageMappingLock(pVM, &Lock);
2561 GCPhysSrc += PAGE_SIZE;
2562 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2563 cb -= PAGE_SIZE;
2564 }
2565 /* won't ever get here. */
2566}
2567
2568
2569/**
2570 * Write to guest physical memory referenced by GC pointer.
2571 * Write memory to GC physical address in guest physical memory.
2572 *
2573 * This will bypass MMIO and access handlers.
2574 *
2575 * @returns VBox status.
2576 * @param pVM VM handle.
2577 * @param GCPhysDst The GC physical address of the destination.
2578 * @param pvSrc The source buffer.
2579 * @param cb The number of bytes to write.
2580 */
2581VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2582{
2583 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2584
2585 /*
2586 * Treat the first page as a special case.
2587 */
2588 if (!cb)
2589 return VINF_SUCCESS;
2590
2591 /* map the 1st page */
2592 void *pvDst;
2593 PGMPAGEMAPLOCK Lock;
2594 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2595 if (RT_FAILURE(rc))
2596 return rc;
2597
2598 /* optimize for the case where access is completely within the first page. */
2599 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2600 if (RT_LIKELY(cb <= cbPage))
2601 {
2602 memcpy(pvDst, pvSrc, cb);
2603 PGMPhysReleasePageMappingLock(pVM, &Lock);
2604 return VINF_SUCCESS;
2605 }
2606
2607 /* copy to the end of the page. */
2608 memcpy(pvDst, pvSrc, cbPage);
2609 PGMPhysReleasePageMappingLock(pVM, &Lock);
2610 GCPhysDst += cbPage;
2611 pvSrc = (const uint8_t *)pvSrc + cbPage;
2612 cb -= cbPage;
2613
2614 /*
2615 * Page by page.
2616 */
2617 for (;;)
2618 {
2619 /* map the page */
2620 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2621 if (RT_FAILURE(rc))
2622 return rc;
2623
2624 /* last page? */
2625 if (cb <= PAGE_SIZE)
2626 {
2627 memcpy(pvDst, pvSrc, cb);
2628 PGMPhysReleasePageMappingLock(pVM, &Lock);
2629 return VINF_SUCCESS;
2630 }
2631
2632 /* copy the entire page and advance */
2633 memcpy(pvDst, pvSrc, PAGE_SIZE);
2634 PGMPhysReleasePageMappingLock(pVM, &Lock);
2635 GCPhysDst += PAGE_SIZE;
2636 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2637 cb -= PAGE_SIZE;
2638 }
2639 /* won't ever get here. */
2640}
2641
2642
2643/**
2644 * Read from guest physical memory referenced by GC pointer.
2645 *
2646 * This function uses the current CR3/CR0/CR4 of the guest and will
2647 * bypass access handlers and not set any accessed bits.
2648 *
2649 * @returns VBox status.
2650 * @param pVCpu The VMCPU handle.
2651 * @param pvDst The destination address.
2652 * @param GCPtrSrc The source address (GC pointer).
2653 * @param cb The number of bytes to read.
2654 */
2655VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2656{
2657 PVM pVM = pVCpu->CTX_SUFF(pVM);
2658
2659 /*
2660 * Treat the first page as a special case.
2661 */
2662 if (!cb)
2663 return VINF_SUCCESS;
2664
2665 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2666 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2667
2668 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2669 * when many VCPUs are fighting for the lock.
2670 */
2671 pgmLock(pVM);
2672
2673 /* map the 1st page */
2674 void const *pvSrc;
2675 PGMPAGEMAPLOCK Lock;
2676 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2677 if (RT_FAILURE(rc))
2678 {
2679 pgmUnlock(pVM);
2680 return rc;
2681 }
2682
2683 /* optimize for the case where access is completely within the first page. */
2684 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2685 if (RT_LIKELY(cb <= cbPage))
2686 {
2687 memcpy(pvDst, pvSrc, cb);
2688 PGMPhysReleasePageMappingLock(pVM, &Lock);
2689 pgmUnlock(pVM);
2690 return VINF_SUCCESS;
2691 }
2692
2693 /* copy to the end of the page. */
2694 memcpy(pvDst, pvSrc, cbPage);
2695 PGMPhysReleasePageMappingLock(pVM, &Lock);
2696 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2697 pvDst = (uint8_t *)pvDst + cbPage;
2698 cb -= cbPage;
2699
2700 /*
2701 * Page by page.
2702 */
2703 for (;;)
2704 {
2705 /* map the page */
2706 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2707 if (RT_FAILURE(rc))
2708 {
2709 pgmUnlock(pVM);
2710 return rc;
2711 }
2712
2713 /* last page? */
2714 if (cb <= PAGE_SIZE)
2715 {
2716 memcpy(pvDst, pvSrc, cb);
2717 PGMPhysReleasePageMappingLock(pVM, &Lock);
2718 pgmUnlock(pVM);
2719 return VINF_SUCCESS;
2720 }
2721
2722 /* copy the entire page and advance */
2723 memcpy(pvDst, pvSrc, PAGE_SIZE);
2724 PGMPhysReleasePageMappingLock(pVM, &Lock);
2725 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2726 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2727 cb -= PAGE_SIZE;
2728 }
2729 /* won't ever get here. */
2730}
2731
2732
2733/**
2734 * Write to guest physical memory referenced by GC pointer.
2735 *
2736 * This function uses the current CR3/CR0/CR4 of the guest and will
2737 * bypass access handlers and not set dirty or accessed bits.
2738 *
2739 * @returns VBox status.
2740 * @param pVCpu The VMCPU handle.
2741 * @param GCPtrDst The destination address (GC pointer).
2742 * @param pvSrc The source address.
2743 * @param cb The number of bytes to write.
2744 */
2745VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2746{
2747 PVM pVM = pVCpu->CTX_SUFF(pVM);
2748
2749 /*
2750 * Treat the first page as a special case.
2751 */
2752 if (!cb)
2753 return VINF_SUCCESS;
2754
2755 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2756 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2757
2758 /* map the 1st page */
2759 void *pvDst;
2760 PGMPAGEMAPLOCK Lock;
2761 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2762 if (RT_FAILURE(rc))
2763 return rc;
2764
2765 /* optimize for the case where access is completely within the first page. */
2766 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2767 if (RT_LIKELY(cb <= cbPage))
2768 {
2769 memcpy(pvDst, pvSrc, cb);
2770 PGMPhysReleasePageMappingLock(pVM, &Lock);
2771 return VINF_SUCCESS;
2772 }
2773
2774 /* copy to the end of the page. */
2775 memcpy(pvDst, pvSrc, cbPage);
2776 PGMPhysReleasePageMappingLock(pVM, &Lock);
2777 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2778 pvSrc = (const uint8_t *)pvSrc + cbPage;
2779 cb -= cbPage;
2780
2781 /*
2782 * Page by page.
2783 */
2784 for (;;)
2785 {
2786 /* map the page */
2787 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2788 if (RT_FAILURE(rc))
2789 return rc;
2790
2791 /* last page? */
2792 if (cb <= PAGE_SIZE)
2793 {
2794 memcpy(pvDst, pvSrc, cb);
2795 PGMPhysReleasePageMappingLock(pVM, &Lock);
2796 return VINF_SUCCESS;
2797 }
2798
2799 /* copy the entire page and advance */
2800 memcpy(pvDst, pvSrc, PAGE_SIZE);
2801 PGMPhysReleasePageMappingLock(pVM, &Lock);
2802 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2803 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2804 cb -= PAGE_SIZE;
2805 }
2806 /* won't ever get here. */
2807}
2808
2809
2810/**
2811 * Write to guest physical memory referenced by GC pointer and update the PTE.
2812 *
2813 * This function uses the current CR3/CR0/CR4 of the guest and will
2814 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2815 *
2816 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2817 *
2818 * @returns VBox status.
2819 * @param pVCpu The VMCPU handle.
2820 * @param GCPtrDst The destination address (GC pointer).
2821 * @param pvSrc The source address.
2822 * @param cb The number of bytes to write.
2823 */
2824VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2825{
2826 PVM pVM = pVCpu->CTX_SUFF(pVM);
2827
2828 /*
2829 * Treat the first page as a special case.
2830 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2831 */
2832 if (!cb)
2833 return VINF_SUCCESS;
2834
2835 /* map the 1st page */
2836 void *pvDst;
2837 PGMPAGEMAPLOCK Lock;
2838 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2839 if (RT_FAILURE(rc))
2840 return rc;
2841
2842 /* optimize for the case where access is completely within the first page. */
2843 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2844 if (RT_LIKELY(cb <= cbPage))
2845 {
2846 memcpy(pvDst, pvSrc, cb);
2847 PGMPhysReleasePageMappingLock(pVM, &Lock);
2848 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2849 return VINF_SUCCESS;
2850 }
2851
2852 /* copy to the end of the page. */
2853 memcpy(pvDst, pvSrc, cbPage);
2854 PGMPhysReleasePageMappingLock(pVM, &Lock);
2855 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2856 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2857 pvSrc = (const uint8_t *)pvSrc + cbPage;
2858 cb -= cbPage;
2859
2860 /*
2861 * Page by page.
2862 */
2863 for (;;)
2864 {
2865 /* map the page */
2866 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2867 if (RT_FAILURE(rc))
2868 return rc;
2869
2870 /* last page? */
2871 if (cb <= PAGE_SIZE)
2872 {
2873 memcpy(pvDst, pvSrc, cb);
2874 PGMPhysReleasePageMappingLock(pVM, &Lock);
2875 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2876 return VINF_SUCCESS;
2877 }
2878
2879 /* copy the entire page and advance */
2880 memcpy(pvDst, pvSrc, PAGE_SIZE);
2881 PGMPhysReleasePageMappingLock(pVM, &Lock);
2882 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2883 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2884 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2885 cb -= PAGE_SIZE;
2886 }
2887 /* won't ever get here. */
2888}
2889
2890
2891/**
2892 * Read from guest physical memory referenced by GC pointer.
2893 *
2894 * This function uses the current CR3/CR0/CR4 of the guest and will
2895 * respect access handlers and set accessed bits.
2896 *
2897 * @returns VBox status.
2898 * @param pVCpu The VMCPU handle.
2899 * @param pvDst The destination address.
2900 * @param GCPtrSrc The source address (GC pointer).
2901 * @param cb The number of bytes to read.
2902 * @thread The vCPU EMT.
2903 */
2904VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2905{
2906 RTGCPHYS GCPhys;
2907 uint64_t fFlags;
2908 int rc;
2909 PVM pVM = pVCpu->CTX_SUFF(pVM);
2910
2911 /*
2912 * Anything to do?
2913 */
2914 if (!cb)
2915 return VINF_SUCCESS;
2916
2917 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2918
2919 /*
2920 * Optimize reads within a single page.
2921 */
2922 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2923 {
2924 /* Convert virtual to physical address + flags */
2925 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2926 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2927 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2928
2929 /* mark the guest page as accessed. */
2930 if (!(fFlags & X86_PTE_A))
2931 {
2932 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2933 AssertRC(rc);
2934 }
2935
2936 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2937 }
2938
2939 /*
2940 * Page by page.
2941 */
2942 for (;;)
2943 {
2944 /* Convert virtual to physical address + flags */
2945 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2946 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2947 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2948
2949 /* mark the guest page as accessed. */
2950 if (!(fFlags & X86_PTE_A))
2951 {
2952 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2953 AssertRC(rc);
2954 }
2955
2956 /* copy */
2957 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2958 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2959 if (cbRead >= cb || RT_FAILURE(rc))
2960 return rc;
2961
2962 /* next */
2963 cb -= cbRead;
2964 pvDst = (uint8_t *)pvDst + cbRead;
2965 GCPtrSrc += cbRead;
2966 }
2967}
2968
2969
2970/**
2971 * Write to guest physical memory referenced by GC pointer.
2972 *
2973 * This function uses the current CR3/CR0/CR4 of the guest and will
2974 * respect access handlers and set dirty and accessed bits.
2975 *
2976 * @returns VBox status.
2977 * @retval VINF_SUCCESS.
2978 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2979 *
2980 * @param pVCpu The VMCPU handle.
2981 * @param GCPtrDst The destination address (GC pointer).
2982 * @param pvSrc The source address.
2983 * @param cb The number of bytes to write.
2984 */
2985VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2986{
2987 RTGCPHYS GCPhys;
2988 uint64_t fFlags;
2989 int rc;
2990 PVM pVM = pVCpu->CTX_SUFF(pVM);
2991
2992 /*
2993 * Anything to do?
2994 */
2995 if (!cb)
2996 return VINF_SUCCESS;
2997
2998 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2999
3000 /*
3001 * Optimize writes within a single page.
3002 */
3003 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3004 {
3005 /* Convert virtual to physical address + flags */
3006 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3007 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3008 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3009
3010 /* Mention when we ignore X86_PTE_RW... */
3011 if (!(fFlags & X86_PTE_RW))
3012 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3013
3014 /* Mark the guest page as accessed and dirty if necessary. */
3015 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3016 {
3017 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3018 AssertRC(rc);
3019 }
3020
3021 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3022 }
3023
3024 /*
3025 * Page by page.
3026 */
3027 for (;;)
3028 {
3029 /* Convert virtual to physical address + flags */
3030 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3031 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3032 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3033
3034 /* Mention when we ignore X86_PTE_RW... */
3035 if (!(fFlags & X86_PTE_RW))
3036 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3037
3038 /* Mark the guest page as accessed and dirty if necessary. */
3039 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3040 {
3041 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3042 AssertRC(rc);
3043 }
3044
3045 /* copy */
3046 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3047 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3048 if (cbWrite >= cb || RT_FAILURE(rc))
3049 return rc;
3050
3051 /* next */
3052 cb -= cbWrite;
3053 pvSrc = (uint8_t *)pvSrc + cbWrite;
3054 GCPtrDst += cbWrite;
3055 }
3056}
3057
3058
3059/**
3060 * Performs a read of guest virtual memory for instruction emulation.
3061 *
3062 * This will check permissions, raise exceptions and update the access bits.
3063 *
3064 * The current implementation will bypass all access handlers. It may later be
3065 * changed to at least respect MMIO.
3066 *
3067 *
3068 * @returns VBox status code suitable to scheduling.
3069 * @retval VINF_SUCCESS if the read was performed successfully.
3070 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3071 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3072 *
3073 * @param pVCpu The VMCPU handle.
3074 * @param pCtxCore The context core.
3075 * @param pvDst Where to put the bytes we've read.
3076 * @param GCPtrSrc The source address.
3077 * @param cb The number of bytes to read. Not more than a page.
3078 *
3079 * @remark This function will dynamically map physical pages in GC. This may unmap
3080 * mappings done by the caller. Be careful!
3081 */
3082VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3083{
3084 PVM pVM = pVCpu->CTX_SUFF(pVM);
3085 Assert(cb <= PAGE_SIZE);
3086
3087/** @todo r=bird: This isn't perfect!
3088 * -# It's not checking for reserved bits being 1.
3089 * -# It's not correctly dealing with the access bit.
3090 * -# It's not respecting MMIO memory or any other access handlers.
3091 */
3092 /*
3093 * 1. Translate virtual to physical. This may fault.
3094 * 2. Map the physical address.
3095 * 3. Do the read operation.
3096 * 4. Set access bits if required.
3097 */
3098 int rc;
3099 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3100 if (cb <= cb1)
3101 {
3102 /*
3103 * Not crossing pages.
3104 */
3105 RTGCPHYS GCPhys;
3106 uint64_t fFlags;
3107 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3108 if (RT_SUCCESS(rc))
3109 {
3110 /** @todo we should check reserved bits ... */
3111 void *pvSrc;
3112 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
3113 switch (rc)
3114 {
3115 case VINF_SUCCESS:
3116 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3117 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3118 break;
3119 case VERR_PGM_PHYS_PAGE_RESERVED:
3120 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3121 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
3122 break;
3123 default:
3124 return rc;
3125 }
3126
3127 /** @todo access bit emulation isn't 100% correct. */
3128 if (!(fFlags & X86_PTE_A))
3129 {
3130 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3131 AssertRC(rc);
3132 }
3133 return VINF_SUCCESS;
3134 }
3135 }
3136 else
3137 {
3138 /*
3139 * Crosses pages.
3140 */
3141 size_t cb2 = cb - cb1;
3142 uint64_t fFlags1;
3143 RTGCPHYS GCPhys1;
3144 uint64_t fFlags2;
3145 RTGCPHYS GCPhys2;
3146 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3147 if (RT_SUCCESS(rc))
3148 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3149 if (RT_SUCCESS(rc))
3150 {
3151 /** @todo we should check reserved bits ... */
3152 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3153 void *pvSrc1;
3154 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
3155 switch (rc)
3156 {
3157 case VINF_SUCCESS:
3158 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3159 break;
3160 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3161 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
3162 break;
3163 default:
3164 return rc;
3165 }
3166
3167 void *pvSrc2;
3168 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
3169 switch (rc)
3170 {
3171 case VINF_SUCCESS:
3172 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3173 break;
3174 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3175 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
3176 break;
3177 default:
3178 return rc;
3179 }
3180
3181 if (!(fFlags1 & X86_PTE_A))
3182 {
3183 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3184 AssertRC(rc);
3185 }
3186 if (!(fFlags2 & X86_PTE_A))
3187 {
3188 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3189 AssertRC(rc);
3190 }
3191 return VINF_SUCCESS;
3192 }
3193 }
3194
3195 /*
3196 * Raise a #PF.
3197 */
3198 uint32_t uErr;
3199
3200 /* Get the current privilege level. */
3201 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3202 switch (rc)
3203 {
3204 case VINF_SUCCESS:
3205 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3206 break;
3207
3208 case VERR_PAGE_NOT_PRESENT:
3209 case VERR_PAGE_TABLE_NOT_PRESENT:
3210 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3211 break;
3212
3213 default:
3214 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3215 return rc;
3216 }
3217 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3218 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3219}
3220
3221
3222/**
3223 * Performs a read of guest virtual memory for instruction emulation.
3224 *
3225 * This will check permissions, raise exceptions and update the access bits.
3226 *
3227 * The current implementation will bypass all access handlers. It may later be
3228 * changed to at least respect MMIO.
3229 *
3230 *
3231 * @returns VBox status code suitable to scheduling.
3232 * @retval VINF_SUCCESS if the read was performed successfully.
3233 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3234 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3235 *
3236 * @param pVCpu The VMCPU handle.
3237 * @param pCtxCore The context core.
3238 * @param pvDst Where to put the bytes we've read.
3239 * @param GCPtrSrc The source address.
3240 * @param cb The number of bytes to read. Not more than a page.
3241 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3242 * an appropriate error status will be returned (no
3243 * informational at all).
3244 *
3245 *
3246 * @remarks Takes the PGM lock.
3247 * @remarks A page fault on the 2nd page of the access will be raised without
3248 * writing the bits on the first page since we're ASSUMING that the
3249 * caller is emulating an instruction access.
3250 * @remarks This function will dynamically map physical pages in GC. This may
3251 * unmap mappings done by the caller. Be careful!
3252 */
3253VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3254{
3255 PVM pVM = pVCpu->CTX_SUFF(pVM);
3256 Assert(cb <= PAGE_SIZE);
3257
3258 /*
3259 * 1. Translate virtual to physical. This may fault.
3260 * 2. Map the physical address.
3261 * 3. Do the read operation.
3262 * 4. Set access bits if required.
3263 */
3264 int rc;
3265 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3266 if (cb <= cb1)
3267 {
3268 /*
3269 * Not crossing pages.
3270 */
3271 RTGCPHYS GCPhys;
3272 uint64_t fFlags;
3273 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3274 if (RT_SUCCESS(rc))
3275 {
3276 if (1) /** @todo we should check reserved bits ... */
3277 {
3278 const void *pvSrc;
3279 PGMPAGEMAPLOCK Lock;
3280 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3281 switch (rc)
3282 {
3283 case VINF_SUCCESS:
3284 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3285 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3286 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3287 break;
3288 case VERR_PGM_PHYS_PAGE_RESERVED:
3289 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3290 memset(pvDst, 0xff, cb);
3291 break;
3292 default:
3293 AssertMsgFailed(("%Rrc\n", rc));
3294 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3295 return rc;
3296 }
3297 PGMPhysReleasePageMappingLock(pVM, &Lock);
3298
3299 if (!(fFlags & X86_PTE_A))
3300 {
3301 /** @todo access bit emulation isn't 100% correct. */
3302 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3303 AssertRC(rc);
3304 }
3305 return VINF_SUCCESS;
3306 }
3307 }
3308 }
3309 else
3310 {
3311 /*
3312 * Crosses pages.
3313 */
3314 size_t cb2 = cb - cb1;
3315 uint64_t fFlags1;
3316 RTGCPHYS GCPhys1;
3317 uint64_t fFlags2;
3318 RTGCPHYS GCPhys2;
3319 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3320 if (RT_SUCCESS(rc))
3321 {
3322 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3323 if (RT_SUCCESS(rc))
3324 {
3325 if (1) /** @todo we should check reserved bits ... */
3326 {
3327 const void *pvSrc;
3328 PGMPAGEMAPLOCK Lock;
3329 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3330 switch (rc)
3331 {
3332 case VINF_SUCCESS:
3333 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3334 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3335 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3336 PGMPhysReleasePageMappingLock(pVM, &Lock);
3337 break;
3338 case VERR_PGM_PHYS_PAGE_RESERVED:
3339 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3340 memset(pvDst, 0xff, cb1);
3341 break;
3342 default:
3343 AssertMsgFailed(("%Rrc\n", rc));
3344 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3345 return rc;
3346 }
3347
3348 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3349 switch (rc)
3350 {
3351 case VINF_SUCCESS:
3352 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3353 PGMPhysReleasePageMappingLock(pVM, &Lock);
3354 break;
3355 case VERR_PGM_PHYS_PAGE_RESERVED:
3356 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3357 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3358 break;
3359 default:
3360 AssertMsgFailed(("%Rrc\n", rc));
3361 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3362 return rc;
3363 }
3364
3365 if (!(fFlags1 & X86_PTE_A))
3366 {
3367 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3368 AssertRC(rc);
3369 }
3370 if (!(fFlags2 & X86_PTE_A))
3371 {
3372 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3373 AssertRC(rc);
3374 }
3375 return VINF_SUCCESS;
3376 }
3377 /* sort out which page */
3378 }
3379 else
3380 GCPtrSrc += cb1; /* fault on 2nd page */
3381 }
3382 }
3383
3384 /*
3385 * Raise a #PF if we're allowed to do that.
3386 */
3387 /* Calc the error bits. */
3388 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3389 uint32_t uErr;
3390 switch (rc)
3391 {
3392 case VINF_SUCCESS:
3393 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3394 rc = VERR_ACCESS_DENIED;
3395 break;
3396
3397 case VERR_PAGE_NOT_PRESENT:
3398 case VERR_PAGE_TABLE_NOT_PRESENT:
3399 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3400 break;
3401
3402 default:
3403 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3404 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3405 return rc;
3406 }
3407 if (fRaiseTrap)
3408 {
3409 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3410 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3411 }
3412 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3413 return rc;
3414}
3415
3416
3417/**
3418 * Performs a write to guest virtual memory for instruction emulation.
3419 *
3420 * This will check permissions, raise exceptions and update the dirty and access
3421 * bits.
3422 *
3423 * @returns VBox status code suitable to scheduling.
3424 * @retval VINF_SUCCESS if the read was performed successfully.
3425 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3426 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3427 *
3428 * @param pVCpu The VMCPU handle.
3429 * @param pCtxCore The context core.
3430 * @param GCPtrDst The destination address.
3431 * @param pvSrc What to write.
3432 * @param cb The number of bytes to write. Not more than a page.
3433 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3434 * an appropriate error status will be returned (no
3435 * informational at all).
3436 *
3437 * @remarks Takes the PGM lock.
3438 * @remarks A page fault on the 2nd page of the access will be raised without
3439 * writing the bits on the first page since we're ASSUMING that the
3440 * caller is emulating an instruction access.
3441 * @remarks This function will dynamically map physical pages in GC. This may
3442 * unmap mappings done by the caller. Be careful!
3443 */
3444VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3445{
3446 Assert(cb <= PAGE_SIZE);
3447 PVM pVM = pVCpu->CTX_SUFF(pVM);
3448
3449 /*
3450 * 1. Translate virtual to physical. This may fault.
3451 * 2. Map the physical address.
3452 * 3. Do the write operation.
3453 * 4. Set access bits if required.
3454 */
3455 int rc;
3456 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3457 if (cb <= cb1)
3458 {
3459 /*
3460 * Not crossing pages.
3461 */
3462 RTGCPHYS GCPhys;
3463 uint64_t fFlags;
3464 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3465 if (RT_SUCCESS(rc))
3466 {
3467 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3468 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3469 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3470 {
3471 void *pvDst;
3472 PGMPAGEMAPLOCK Lock;
3473 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3474 switch (rc)
3475 {
3476 case VINF_SUCCESS:
3477 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3478 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3479 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3480 PGMPhysReleasePageMappingLock(pVM, &Lock);
3481 break;
3482 case VERR_PGM_PHYS_PAGE_RESERVED:
3483 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3484 /* bit bucket */
3485 break;
3486 default:
3487 AssertMsgFailed(("%Rrc\n", rc));
3488 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3489 return rc;
3490 }
3491
3492 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3493 {
3494 /** @todo dirty & access bit emulation isn't 100% correct. */
3495 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3496 AssertRC(rc);
3497 }
3498 return VINF_SUCCESS;
3499 }
3500 rc = VERR_ACCESS_DENIED;
3501 }
3502 }
3503 else
3504 {
3505 /*
3506 * Crosses pages.
3507 */
3508 size_t cb2 = cb - cb1;
3509 uint64_t fFlags1;
3510 RTGCPHYS GCPhys1;
3511 uint64_t fFlags2;
3512 RTGCPHYS GCPhys2;
3513 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3514 if (RT_SUCCESS(rc))
3515 {
3516 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3517 if (RT_SUCCESS(rc))
3518 {
3519 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3520 && (fFlags2 & X86_PTE_RW))
3521 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3522 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3523 {
3524 void *pvDst;
3525 PGMPAGEMAPLOCK Lock;
3526 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3527 switch (rc)
3528 {
3529 case VINF_SUCCESS:
3530 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3531 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3532 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3533 PGMPhysReleasePageMappingLock(pVM, &Lock);
3534 break;
3535 case VERR_PGM_PHYS_PAGE_RESERVED:
3536 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3537 /* bit bucket */
3538 break;
3539 default:
3540 AssertMsgFailed(("%Rrc\n", rc));
3541 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3542 return rc;
3543 }
3544
3545 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3546 switch (rc)
3547 {
3548 case VINF_SUCCESS:
3549 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3550 PGMPhysReleasePageMappingLock(pVM, &Lock);
3551 break;
3552 case VERR_PGM_PHYS_PAGE_RESERVED:
3553 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3554 /* bit bucket */
3555 break;
3556 default:
3557 AssertMsgFailed(("%Rrc\n", rc));
3558 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3559 return rc;
3560 }
3561
3562 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3563 {
3564 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3565 AssertRC(rc);
3566 }
3567 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3568 {
3569 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3570 AssertRC(rc);
3571 }
3572 return VINF_SUCCESS;
3573 }
3574 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3575 GCPtrDst += cb1; /* fault on the 2nd page. */
3576 rc = VERR_ACCESS_DENIED;
3577 }
3578 else
3579 GCPtrDst += cb1; /* fault on the 2nd page. */
3580 }
3581 }
3582
3583 /*
3584 * Raise a #PF if we're allowed to do that.
3585 */
3586 /* Calc the error bits. */
3587 uint32_t uErr;
3588 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3589 switch (rc)
3590 {
3591 case VINF_SUCCESS:
3592 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3593 rc = VERR_ACCESS_DENIED;
3594 break;
3595
3596 case VERR_ACCESS_DENIED:
3597 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3598 break;
3599
3600 case VERR_PAGE_NOT_PRESENT:
3601 case VERR_PAGE_TABLE_NOT_PRESENT:
3602 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3603 break;
3604
3605 default:
3606 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3607 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3608 return rc;
3609 }
3610 if (fRaiseTrap)
3611 {
3612 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3613 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3614 }
3615 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3616 return rc;
3617}
3618
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette