VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 27046

Last change on this file since 27046 was 27040, checked in by vboxsync, 15 years ago

Logging change

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 127.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 27040 2010-03-04 14:49:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "../PGMInternal.h"
33#include <VBox/vm.h>
34#include "../PGMInline.h"
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <iprt/assert.h>
38#include <iprt/string.h>
39#include <iprt/asm.h>
40#include <VBox/log.h>
41#ifdef IN_RING3
42# include <iprt/thread.h>
43#endif
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49/** Enable the physical TLB. */
50#define PGM_WITH_PHYS_TLB
51
52
53
54#ifndef IN_RING3
55
56/**
57 * \#PF Handler callback for Guest ROM range write access.
58 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
59 *
60 * @returns VBox status code (appropritate for trap handling and GC return).
61 * @param pVM VM Handle.
62 * @param uErrorCode CPU Error code.
63 * @param pRegFrame Trap register frame.
64 * @param pvFault The fault address (cr2).
65 * @param GCPhysFault The GC physical address corresponding to pvFault.
66 * @param pvUser User argument. Pointer to the ROM range structure.
67 */
68VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
69{
70 int rc;
71 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
72 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
73 PVMCPU pVCpu = VMMGetCpu(pVM);
74
75 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
76 switch (pRom->aPages[iPage].enmProt)
77 {
78 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
79 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
80 {
81 /*
82 * If it's a simple instruction which doesn't change the cpu state
83 * we will simply skip it. Otherwise we'll have to defer it to REM.
84 */
85 uint32_t cbOp;
86 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
87 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
88 if ( RT_SUCCESS(rc)
89 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
90 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
91 {
92 switch (pDis->opcode)
93 {
94 /** @todo Find other instructions we can safely skip, possibly
95 * adding this kind of detection to DIS or EM. */
96 case OP_MOV:
97 pRegFrame->rip += cbOp;
98 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
99 return VINF_SUCCESS;
100 }
101 }
102 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
103 return rc;
104 break;
105 }
106
107 case PGMROMPROT_READ_RAM_WRITE_RAM:
108 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
109 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
110 AssertRC(rc);
111 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
112
113 case PGMROMPROT_READ_ROM_WRITE_RAM:
114 /* Handle it in ring-3 because it's *way* easier there. */
115 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
116 break;
117
118 default:
119 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
120 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
121 VERR_INTERNAL_ERROR);
122 }
123
124 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
125 return VINF_EM_RAW_EMULATE_INSTR;
126}
127
128#endif /* IN_RING3 */
129
130/**
131 * Checks if Address Gate 20 is enabled or not.
132 *
133 * @returns true if enabled.
134 * @returns false if disabled.
135 * @param pVCpu VMCPU handle.
136 */
137VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
138{
139 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
140 return pVCpu->pgm.s.fA20Enabled;
141}
142
143
144/**
145 * Validates a GC physical address.
146 *
147 * @returns true if valid.
148 * @returns false if invalid.
149 * @param pVM The VM handle.
150 * @param GCPhys The physical address to validate.
151 */
152VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
153{
154 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
155 return pPage != NULL;
156}
157
158
159/**
160 * Checks if a GC physical address is a normal page,
161 * i.e. not ROM, MMIO or reserved.
162 *
163 * @returns true if normal.
164 * @returns false if invalid, ROM, MMIO or reserved page.
165 * @param pVM The VM handle.
166 * @param GCPhys The physical address to check.
167 */
168VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
169{
170 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
171 return pPage
172 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
173}
174
175
176/**
177 * Converts a GC physical address to a HC physical address.
178 *
179 * @returns VINF_SUCCESS on success.
180 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
181 * page but has no physical backing.
182 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
183 * GC physical address.
184 *
185 * @param pVM The VM handle.
186 * @param GCPhys The GC physical address to convert.
187 * @param pHCPhys Where to store the HC physical address on success.
188 */
189VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
190{
191 pgmLock(pVM);
192 PPGMPAGE pPage;
193 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
194 if (RT_SUCCESS(rc))
195 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
196 pgmUnlock(pVM);
197 return rc;
198}
199
200
201/**
202 * Invalidates all page mapping TLBs.
203 *
204 * @param pVM The VM handle.
205 */
206VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
207{
208 pgmLock(pVM);
209 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushes);
210 /* Clear the shared R0/R3 TLB completely. */
211 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
212 {
213 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
214 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
215 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
216 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
217 }
218 /* @todo clear the RC TLB whenever we add it. */
219 pgmUnlock(pVM);
220}
221
222/**
223 * Invalidates a page mapping TLB entry
224 *
225 * @param pVM The VM handle.
226 * @param GCPhys GCPhys entry to flush
227 */
228VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
229{
230 Assert(PGMIsLocked(pVM));
231
232 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushEntry);
233 /* Clear the shared R0/R3 TLB entry. */
234#ifdef IN_RC
235 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
236 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
237 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
238 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
239 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
240#else
241 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
242 pTlbe->GCPhys = NIL_RTGCPHYS;
243 pTlbe->pPage = 0;
244 pTlbe->pMap = 0;
245 pTlbe->pv = 0;
246#endif
247 /* @todo clear the RC TLB whenever we add it. */
248}
249
250/**
251 * Makes sure that there is at least one handy page ready for use.
252 *
253 * This will also take the appropriate actions when reaching water-marks.
254 *
255 * @returns VBox status code.
256 * @retval VINF_SUCCESS on success.
257 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
258 *
259 * @param pVM The VM handle.
260 *
261 * @remarks Must be called from within the PGM critical section. It may
262 * nip back to ring-3/0 in some cases.
263 */
264static int pgmPhysEnsureHandyPage(PVM pVM)
265{
266 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
267
268 /*
269 * Do we need to do anything special?
270 */
271#ifdef IN_RING3
272 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
273#else
274 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
275#endif
276 {
277 /*
278 * Allocate pages only if we're out of them, or in ring-3, almost out.
279 */
280#ifdef IN_RING3
281 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
282#else
283 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
284#endif
285 {
286 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
287 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
288#ifdef IN_RING3
289 int rc = PGMR3PhysAllocateHandyPages(pVM);
290#else
291 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
292#endif
293 if (RT_UNLIKELY(rc != VINF_SUCCESS))
294 {
295 if (RT_FAILURE(rc))
296 return rc;
297 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
298 if (!pVM->pgm.s.cHandyPages)
299 {
300 LogRel(("PGM: no more handy pages!\n"));
301 return VERR_EM_NO_MEMORY;
302 }
303 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
304 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
305#ifdef IN_RING3
306 REMR3NotifyFF(pVM);
307#else
308 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
309#endif
310 }
311 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
312 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
313 ("%u\n", pVM->pgm.s.cHandyPages),
314 VERR_INTERNAL_ERROR);
315 }
316 else
317 {
318 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
319 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
320#ifndef IN_RING3
321 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
322 {
323 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
324 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
325 }
326#endif
327 }
328 }
329
330 return VINF_SUCCESS;
331}
332
333
334/**
335 * Replace a zero or shared page with new page that we can write to.
336 *
337 * @returns The following VBox status codes.
338 * @retval VINF_SUCCESS on success, pPage is modified.
339 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
340 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
341 *
342 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
343 *
344 * @param pVM The VM address.
345 * @param pPage The physical page tracking structure. This will
346 * be modified on success.
347 * @param GCPhys The address of the page.
348 *
349 * @remarks Must be called from within the PGM critical section. It may
350 * nip back to ring-3/0 in some cases.
351 *
352 * @remarks This function shouldn't really fail, however if it does
353 * it probably means we've screwed up the size of handy pages and/or
354 * the low-water mark. Or, that some device I/O is causing a lot of
355 * pages to be allocated while while the host is in a low-memory
356 * condition. This latter should be handled elsewhere and in a more
357 * controlled manner, it's on the @bugref{3170} todo list...
358 */
359int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
360{
361 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
362
363 /*
364 * Prereqs.
365 */
366 Assert(PGMIsLocked(pVM));
367 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
368 Assert(!PGM_PAGE_IS_MMIO(pPage));
369
370# ifdef PGM_WITH_LARGE_PAGES
371 if ( PGMIsUsingLargePages(pVM)
372 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
373 {
374 int rc = pgmPhysAllocLargePage(pVM, GCPhys);
375 if (rc == VINF_SUCCESS)
376 return rc;
377
378 /* fall back to 4kb pages. */
379 }
380# endif
381
382 /*
383 * Flush any shadow page table mappings of the page.
384 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
385 */
386 bool fFlushTLBs = false;
387 int rc = pgmPoolTrackFlushGCPhys(pVM, GCPhys, pPage, &fFlushTLBs);
388 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
389
390 /*
391 * Ensure that we've got a page handy, take it and use it.
392 */
393 int rc2 = pgmPhysEnsureHandyPage(pVM);
394 if (RT_FAILURE(rc2))
395 {
396 if (fFlushTLBs)
397 PGM_INVL_ALL_VCPU_TLBS(pVM);
398 Assert(rc2 == VERR_EM_NO_MEMORY);
399 return rc2;
400 }
401 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
402 Assert(PGMIsLocked(pVM));
403 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
404 Assert(!PGM_PAGE_IS_MMIO(pPage));
405
406 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
407 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
408 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
409 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
410 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
411 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
412
413 /*
414 * There are one or two action to be taken the next time we allocate handy pages:
415 * - Tell the GMM (global memory manager) what the page is being used for.
416 * (Speeds up replacement operations - sharing and defragmenting.)
417 * - If the current backing is shared, it must be freed.
418 */
419 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
420 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
421
422 if (PGM_PAGE_IS_SHARED(pPage))
423 {
424 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
425 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
426 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
427
428 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
429 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
430 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
431 pVM->pgm.s.cSharedPages--;
432 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
433 }
434 else
435 {
436 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
437 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
438 pVM->pgm.s.cZeroPages--;
439 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
440 }
441
442 /*
443 * Do the PGMPAGE modifications.
444 */
445 pVM->pgm.s.cPrivatePages++;
446 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
447 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
448 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
449 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
450 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
451
452 if ( fFlushTLBs
453 && rc != VINF_PGM_GCPHYS_ALIASED)
454 PGM_INVL_ALL_VCPU_TLBS(pVM);
455 return rc;
456}
457
458#ifdef PGM_WITH_LARGE_PAGES
459/**
460 * Replace a 2 MB range of zero pages with new pages that we can write to.
461 *
462 * @returns The following VBox status codes.
463 * @retval VINF_SUCCESS on success, pPage is modified.
464 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
465 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
466 *
467 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
468 *
469 * @param pVM The VM address.
470 * @param GCPhys The address of the page.
471 *
472 * @remarks Must be called from within the PGM critical section. It may
473 * nip back to ring-3/0 in some cases.
474 */
475int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
476{
477 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
478 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
479
480 /*
481 * Prereqs.
482 */
483 Assert(PGMIsLocked(pVM));
484 Assert(PGMIsUsingLargePages(pVM));
485
486 PPGMPAGE pPage;
487 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
488 if ( RT_SUCCESS(rc)
489 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
490 {
491 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
492
493 /* Don't call this function for already allocated pages. */
494 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
495
496 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
497 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
498 {
499 unsigned iPage;
500
501 GCPhys = GCPhysBase;
502
503 /* Lazy approach: check all pages in the 2 MB range.
504 * The whole range must be ram and unallocated
505 */
506 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
507 {
508 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
509 if ( RT_FAILURE(rc)
510 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
511 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO) /* allocated, monitored or shared means we can't use a large page here */
512 {
513 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
514 break;
515 }
516 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
517 GCPhys += PAGE_SIZE;
518 }
519 /* Fetch the start page of the 2 MB range again. */
520 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
521 AssertRC(rc); /* can't fail */
522
523 if (iPage != _2M/PAGE_SIZE)
524 {
525 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
526 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
527 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
528 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
529 }
530 else
531 {
532# ifdef IN_RING3
533 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
534# else
535 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
536# endif
537 if (RT_SUCCESS(rc))
538 {
539 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
540 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageAlloc);
541 return VINF_SUCCESS;
542 }
543 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
544
545 /* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
546 PGMSetLargePageUsage(pVM, false);
547 return rc;
548 }
549 }
550 }
551 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
552}
553
554/**
555 * Recheck the entire 2 MB range to see if we can use it again as a large page.
556 *
557 * @returns The following VBox status codes.
558 * @retval VINF_SUCCESS on success, the large page can be used again
559 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
560 *
561 * @param pVM The VM address.
562 * @param GCPhys The address of the page.
563 * @param pLargePage Page structure of the base page
564 */
565int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
566{
567 unsigned i;
568
569 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
570
571 GCPhys &= X86_PDE2M_PAE_PG_MASK;
572
573 /* Check the base page. */
574 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
575 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
576 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
577 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
578 {
579 LogFlow(("pgmPhysIsValidLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
580 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
581 }
582
583 /* Check all remaining pages in the 2 MB range. */
584 GCPhys += PAGE_SIZE;
585 for (i = 1; i < _2M/PAGE_SIZE; i++)
586 {
587 PPGMPAGE pPage;
588 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
589 AssertRCBreak(rc);
590
591 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
592 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
593 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
594 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
595 {
596 LogFlow(("pgmPhysIsValidLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
597 break;
598 }
599
600 GCPhys += PAGE_SIZE;
601 }
602 if (i == _2M/PAGE_SIZE)
603 {
604 PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE);
605 Log(("pgmPhysIsValidLargePage: page %RGp can be reused!\n", GCPhys - _2M));
606 return VINF_SUCCESS;
607 }
608
609 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
610}
611
612#endif /* PGM_WITH_LARGE_PAGES */
613
614/**
615 * Deal with a write monitored page.
616 *
617 * @returns VBox strict status code.
618 *
619 * @param pVM The VM address.
620 * @param pPage The physical page tracking structure.
621 *
622 * @remarks Called from within the PGM critical section.
623 */
624void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
625{
626 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
627 PGM_PAGE_SET_WRITTEN_TO(pPage);
628 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
629 Assert(pVM->pgm.s.cMonitoredPages > 0);
630 pVM->pgm.s.cMonitoredPages--;
631 pVM->pgm.s.cWrittenToPages++;
632}
633
634
635/**
636 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
637 *
638 * @returns VBox strict status code.
639 * @retval VINF_SUCCESS on success.
640 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
641 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
642 *
643 * @param pVM The VM address.
644 * @param pPage The physical page tracking structure.
645 * @param GCPhys The address of the page.
646 *
647 * @remarks Called from within the PGM critical section.
648 */
649int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
650{
651 switch (PGM_PAGE_GET_STATE(pPage))
652 {
653 case PGM_PAGE_STATE_WRITE_MONITORED:
654 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
655 /* fall thru */
656 default: /* to shut up GCC */
657 case PGM_PAGE_STATE_ALLOCATED:
658 return VINF_SUCCESS;
659
660 /*
661 * Zero pages can be dummy pages for MMIO or reserved memory,
662 * so we need to check the flags before joining cause with
663 * shared page replacement.
664 */
665 case PGM_PAGE_STATE_ZERO:
666 if (PGM_PAGE_IS_MMIO(pPage))
667 return VERR_PGM_PHYS_PAGE_RESERVED;
668 /* fall thru */
669 case PGM_PAGE_STATE_SHARED:
670 return pgmPhysAllocPage(pVM, pPage, GCPhys);
671 }
672}
673
674
675/**
676 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
677 *
678 * @returns VBox strict status code.
679 * @retval VINF_SUCCESS on success.
680 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
681 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
682 *
683 * @param pVM The VM address.
684 * @param pPage The physical page tracking structure.
685 * @param GCPhys The address of the page.
686 */
687int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
688{
689 int rc = pgmLock(pVM);
690 if (RT_SUCCESS(rc))
691 {
692 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
693 pgmUnlock(pVM);
694 }
695 return rc;
696}
697
698
699/**
700 * Internal usage: Map the page specified by its GMM ID.
701 *
702 * This is similar to pgmPhysPageMap
703 *
704 * @returns VBox status code.
705 *
706 * @param pVM The VM handle.
707 * @param idPage The Page ID.
708 * @param HCPhys The physical address (for RC).
709 * @param ppv Where to store the mapping address.
710 *
711 * @remarks Called from within the PGM critical section. The mapping is only
712 * valid while your inside this section.
713 */
714int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
715{
716 /*
717 * Validation.
718 */
719 Assert(PGMIsLocked(pVM));
720 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
721 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
722 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
723
724#ifdef IN_RC
725 /*
726 * Map it by HCPhys.
727 */
728 return PGMDynMapHCPage(pVM, HCPhys, ppv);
729
730#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
731 /*
732 * Map it by HCPhys.
733 */
734 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
735
736#else
737 /*
738 * Find/make Chunk TLB entry for the mapping chunk.
739 */
740 PPGMCHUNKR3MAP pMap;
741 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
742 if (pTlbe->idChunk == idChunk)
743 {
744 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
745 pMap = pTlbe->pChunk;
746 }
747 else
748 {
749 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
750
751 /*
752 * Find the chunk, map it if necessary.
753 */
754 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
755 if (!pMap)
756 {
757# ifdef IN_RING0
758 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
759 AssertRCReturn(rc, rc);
760 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
761 Assert(pMap);
762# else
763 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
764 if (RT_FAILURE(rc))
765 return rc;
766# endif
767 }
768
769 /*
770 * Enter it into the Chunk TLB.
771 */
772 pTlbe->idChunk = idChunk;
773 pTlbe->pChunk = pMap;
774 pMap->iAge = 0;
775 }
776
777 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
778 return VINF_SUCCESS;
779#endif
780}
781
782
783/**
784 * Maps a page into the current virtual address space so it can be accessed.
785 *
786 * @returns VBox status code.
787 * @retval VINF_SUCCESS on success.
788 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
789 *
790 * @param pVM The VM address.
791 * @param pPage The physical page tracking structure.
792 * @param GCPhys The address of the page.
793 * @param ppMap Where to store the address of the mapping tracking structure.
794 * @param ppv Where to store the mapping address of the page. The page
795 * offset is masked off!
796 *
797 * @remarks Called from within the PGM critical section.
798 */
799static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
800{
801 Assert(PGMIsLocked(pVM));
802
803#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
804 /*
805 * Just some sketchy GC/R0-darwin code.
806 */
807 *ppMap = NULL;
808 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
809 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
810# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
811 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
812# else
813 PGMDynMapHCPage(pVM, HCPhys, ppv);
814# endif
815 return VINF_SUCCESS;
816
817#else /* IN_RING3 || IN_RING0 */
818
819
820 /*
821 * Special case: ZERO and MMIO2 pages.
822 */
823 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
824 if (idChunk == NIL_GMM_CHUNKID)
825 {
826 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
827 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
828 {
829 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
830 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
831 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
832 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
833 }
834 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
835 {
836 /** @todo deal with aliased MMIO2 pages somehow...
837 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
838 * them, that would also avoid this mess. It would actually be kind of
839 * elegant... */
840 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
841 }
842 else
843 {
844 /** @todo handle MMIO2 */
845 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
846 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
847 ("pPage=%R[pgmpage]\n", pPage),
848 VERR_INTERNAL_ERROR_2);
849 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
850 }
851 *ppMap = NULL;
852 return VINF_SUCCESS;
853 }
854
855 /*
856 * Find/make Chunk TLB entry for the mapping chunk.
857 */
858 PPGMCHUNKR3MAP pMap;
859 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
860 if (pTlbe->idChunk == idChunk)
861 {
862 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
863 pMap = pTlbe->pChunk;
864 }
865 else
866 {
867 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
868
869 /*
870 * Find the chunk, map it if necessary.
871 */
872 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
873 if (!pMap)
874 {
875#ifdef IN_RING0
876 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
877 AssertRCReturn(rc, rc);
878 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
879 Assert(pMap);
880#else
881 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
882 if (RT_FAILURE(rc))
883 return rc;
884#endif
885 }
886
887 /*
888 * Enter it into the Chunk TLB.
889 */
890 pTlbe->idChunk = idChunk;
891 pTlbe->pChunk = pMap;
892 pMap->iAge = 0;
893 }
894
895 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
896 *ppMap = pMap;
897 return VINF_SUCCESS;
898#endif /* IN_RING3 */
899}
900
901
902/**
903 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
904 *
905 * This is typically used is paths where we cannot use the TLB methods (like ROM
906 * pages) or where there is no point in using them since we won't get many hits.
907 *
908 * @returns VBox strict status code.
909 * @retval VINF_SUCCESS on success.
910 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
911 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
912 *
913 * @param pVM The VM address.
914 * @param pPage The physical page tracking structure.
915 * @param GCPhys The address of the page.
916 * @param ppv Where to store the mapping address of the page. The page
917 * offset is masked off!
918 *
919 * @remarks Called from within the PGM critical section. The mapping is only
920 * valid while your inside this section.
921 */
922int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
923{
924 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
925 if (RT_SUCCESS(rc))
926 {
927 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
928 PPGMPAGEMAP pMapIgnore;
929 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
930 if (RT_FAILURE(rc2)) /* preserve rc */
931 rc = rc2;
932 }
933 return rc;
934}
935
936
937/**
938 * Maps a page into the current virtual address space so it can be accessed for
939 * both writing and reading.
940 *
941 * This is typically used is paths where we cannot use the TLB methods (like ROM
942 * pages) or where there is no point in using them since we won't get many hits.
943 *
944 * @returns VBox status code.
945 * @retval VINF_SUCCESS on success.
946 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
947 *
948 * @param pVM The VM address.
949 * @param pPage The physical page tracking structure. Must be in the
950 * allocated state.
951 * @param GCPhys The address of the page.
952 * @param ppv Where to store the mapping address of the page. The page
953 * offset is masked off!
954 *
955 * @remarks Called from within the PGM critical section. The mapping is only
956 * valid while your inside this section.
957 */
958int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
959{
960 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
961 PPGMPAGEMAP pMapIgnore;
962 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
963}
964
965
966/**
967 * Maps a page into the current virtual address space so it can be accessed for
968 * reading.
969 *
970 * This is typically used is paths where we cannot use the TLB methods (like ROM
971 * pages) or where there is no point in using them since we won't get many hits.
972 *
973 * @returns VBox status code.
974 * @retval VINF_SUCCESS on success.
975 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
976 *
977 * @param pVM The VM address.
978 * @param pPage The physical page tracking structure.
979 * @param GCPhys The address of the page.
980 * @param ppv Where to store the mapping address of the page. The page
981 * offset is masked off!
982 *
983 * @remarks Called from within the PGM critical section. The mapping is only
984 * valid while your inside this section.
985 */
986int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
987{
988 PPGMPAGEMAP pMapIgnore;
989 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
990}
991
992
993#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
994/**
995 * Load a guest page into the ring-3 physical TLB.
996 *
997 * @returns VBox status code.
998 * @retval VINF_SUCCESS on success
999 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1000 * @param pPGM The PGM instance pointer.
1001 * @param GCPhys The guest physical address in question.
1002 */
1003int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
1004{
1005 Assert(PGMIsLocked(PGM2VM(pPGM)));
1006 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
1007
1008 /*
1009 * Find the ram range.
1010 * 99.8% of requests are expected to be in the first range.
1011 */
1012 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
1013 RTGCPHYS off = GCPhys - pRam->GCPhys;
1014 if (RT_UNLIKELY(off >= pRam->cb))
1015 {
1016 do
1017 {
1018 pRam = pRam->CTX_SUFF(pNext);
1019 if (!pRam)
1020 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1021 off = GCPhys - pRam->GCPhys;
1022 } while (off >= pRam->cb);
1023 }
1024
1025 /*
1026 * Map the page.
1027 * Make a special case for the zero page as it is kind of special.
1028 */
1029 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
1030 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1031 if (!PGM_PAGE_IS_ZERO(pPage))
1032 {
1033 void *pv;
1034 PPGMPAGEMAP pMap;
1035 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
1036 if (RT_FAILURE(rc))
1037 return rc;
1038 pTlbe->pMap = pMap;
1039 pTlbe->pv = pv;
1040 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1041 }
1042 else
1043 {
1044 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
1045 pTlbe->pMap = NULL;
1046 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
1047 }
1048#ifdef PGM_WITH_PHYS_TLB
1049 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1050#else
1051 pTlbe->GCPhys = NIL_RTGCPHYS;
1052#endif
1053 pTlbe->pPage = pPage;
1054 return VINF_SUCCESS;
1055}
1056
1057
1058/**
1059 * Load a guest page into the ring-3 physical TLB.
1060 *
1061 * @returns VBox status code.
1062 * @retval VINF_SUCCESS on success
1063 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1064 *
1065 * @param pPGM The PGM instance pointer.
1066 * @param pPage Pointer to the PGMPAGE structure corresponding to
1067 * GCPhys.
1068 * @param GCPhys The guest physical address in question.
1069 */
1070int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1071{
1072 Assert(PGMIsLocked(PGM2VM(pPGM)));
1073 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
1074
1075 /*
1076 * Map the page.
1077 * Make a special case for the zero page as it is kind of special.
1078 */
1079 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1080 if (!PGM_PAGE_IS_ZERO(pPage))
1081 {
1082 void *pv;
1083 PPGMPAGEMAP pMap;
1084 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
1085 if (RT_FAILURE(rc))
1086 return rc;
1087 pTlbe->pMap = pMap;
1088 pTlbe->pv = pv;
1089 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1090 }
1091 else
1092 {
1093 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
1094 pTlbe->pMap = NULL;
1095 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
1096 }
1097#ifdef PGM_WITH_PHYS_TLB
1098 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1099#else
1100 pTlbe->GCPhys = NIL_RTGCPHYS;
1101#endif
1102 pTlbe->pPage = pPage;
1103 return VINF_SUCCESS;
1104}
1105#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1106
1107
1108/**
1109 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1110 * own the PGM lock and therefore not need to lock the mapped page.
1111 *
1112 * @returns VBox status code.
1113 * @retval VINF_SUCCESS on success.
1114 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1115 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1116 *
1117 * @param pVM The VM handle.
1118 * @param GCPhys The guest physical address of the page that should be mapped.
1119 * @param pPage Pointer to the PGMPAGE structure for the page.
1120 * @param ppv Where to store the address corresponding to GCPhys.
1121 *
1122 * @internal
1123 */
1124int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1125{
1126 int rc;
1127 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1128 Assert(PGMIsLocked(pVM));
1129
1130 /*
1131 * Make sure the page is writable.
1132 */
1133 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1134 {
1135 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1136 if (RT_FAILURE(rc))
1137 return rc;
1138 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1139 }
1140 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1141
1142 /*
1143 * Get the mapping address.
1144 */
1145#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1146 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
1147#else
1148 PPGMPAGEMAPTLBE pTlbe;
1149 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1150 if (RT_FAILURE(rc))
1151 return rc;
1152 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1153#endif
1154 return VINF_SUCCESS;
1155}
1156
1157
1158/**
1159 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1160 * own the PGM lock and therefore not need to lock the mapped page.
1161 *
1162 * @returns VBox status code.
1163 * @retval VINF_SUCCESS on success.
1164 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1165 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1166 *
1167 * @param pVM The VM handle.
1168 * @param GCPhys The guest physical address of the page that should be mapped.
1169 * @param pPage Pointer to the PGMPAGE structure for the page.
1170 * @param ppv Where to store the address corresponding to GCPhys.
1171 *
1172 * @internal
1173 */
1174int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
1175{
1176 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1177 Assert(PGMIsLocked(pVM));
1178 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1179
1180 /*
1181 * Get the mapping address.
1182 */
1183#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1184 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1185#else
1186 PPGMPAGEMAPTLBE pTlbe;
1187 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1188 if (RT_FAILURE(rc))
1189 return rc;
1190 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1191#endif
1192 return VINF_SUCCESS;
1193}
1194
1195
1196/**
1197 * Requests the mapping of a guest page into the current context.
1198 *
1199 * This API should only be used for very short term, as it will consume
1200 * scarse resources (R0 and GC) in the mapping cache. When you're done
1201 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1202 *
1203 * This API will assume your intention is to write to the page, and will
1204 * therefore replace shared and zero pages. If you do not intend to modify
1205 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1206 *
1207 * @returns VBox status code.
1208 * @retval VINF_SUCCESS on success.
1209 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1210 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1211 *
1212 * @param pVM The VM handle.
1213 * @param GCPhys The guest physical address of the page that should be mapped.
1214 * @param ppv Where to store the address corresponding to GCPhys.
1215 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1216 *
1217 * @remarks The caller is responsible for dealing with access handlers.
1218 * @todo Add an informational return code for pages with access handlers?
1219 *
1220 * @remark Avoid calling this API from within critical sections (other than the
1221 * PGM one) because of the deadlock risk. External threads may need to
1222 * delegate jobs to the EMTs.
1223 * @thread Any thread.
1224 */
1225VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1226{
1227#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1228
1229 /*
1230 * Find the page and make sure it's writable.
1231 */
1232 PPGMPAGE pPage;
1233 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1234 if (RT_SUCCESS(rc))
1235 {
1236 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1237 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1238 if (RT_SUCCESS(rc))
1239 {
1240 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1241# if 0
1242 pLock->pvMap = 0;
1243 pLock->pvPage = pPage;
1244# else
1245 pLock->u32Dummy = UINT32_MAX;
1246# endif
1247 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1248 rc = VINF_SUCCESS;
1249 }
1250 }
1251
1252#else /* IN_RING3 || IN_RING0 */
1253 int rc = pgmLock(pVM);
1254 AssertRCReturn(rc, rc);
1255
1256 /*
1257 * Query the Physical TLB entry for the page (may fail).
1258 */
1259 PPGMPAGEMAPTLBE pTlbe;
1260 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1261 if (RT_SUCCESS(rc))
1262 {
1263 /*
1264 * If the page is shared, the zero page, or being write monitored
1265 * it must be converted to an page that's writable if possible.
1266 */
1267 PPGMPAGE pPage = pTlbe->pPage;
1268 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1269 {
1270 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1271 if (RT_SUCCESS(rc))
1272 {
1273 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1274 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1275 }
1276 }
1277 if (RT_SUCCESS(rc))
1278 {
1279 /*
1280 * Now, just perform the locking and calculate the return address.
1281 */
1282 PPGMPAGEMAP pMap = pTlbe->pMap;
1283 if (pMap)
1284 pMap->cRefs++;
1285
1286 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1287 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1288 {
1289 if (cLocks == 0)
1290 pVM->pgm.s.cWriteLockedPages++;
1291 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1292 }
1293 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1294 {
1295 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1296 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1297 if (pMap)
1298 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1299 }
1300
1301 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1302 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1303 pLock->pvMap = pMap;
1304 }
1305 }
1306
1307 pgmUnlock(pVM);
1308#endif /* IN_RING3 || IN_RING0 */
1309 return rc;
1310}
1311
1312
1313/**
1314 * Requests the mapping of a guest page into the current context.
1315 *
1316 * This API should only be used for very short term, as it will consume
1317 * scarse resources (R0 and GC) in the mapping cache. When you're done
1318 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1319 *
1320 * @returns VBox status code.
1321 * @retval VINF_SUCCESS on success.
1322 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1323 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1324 *
1325 * @param pVM The VM handle.
1326 * @param GCPhys The guest physical address of the page that should be mapped.
1327 * @param ppv Where to store the address corresponding to GCPhys.
1328 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1329 *
1330 * @remarks The caller is responsible for dealing with access handlers.
1331 * @todo Add an informational return code for pages with access handlers?
1332 *
1333 * @remark Avoid calling this API from within critical sections (other than
1334 * the PGM one) because of the deadlock risk.
1335 * @thread Any thread.
1336 */
1337VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1338{
1339#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1340
1341 /*
1342 * Find the page and make sure it's readable.
1343 */
1344 PPGMPAGE pPage;
1345 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1346 if (RT_SUCCESS(rc))
1347 {
1348 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1349 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1350 else
1351 {
1352 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1353# if 0
1354 pLock->pvMap = 0;
1355 pLock->pvPage = pPage;
1356# else
1357 pLock->u32Dummy = UINT32_MAX;
1358# endif
1359 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1360 rc = VINF_SUCCESS;
1361 }
1362 }
1363
1364#else /* IN_RING3 || IN_RING0 */
1365 int rc = pgmLock(pVM);
1366 AssertRCReturn(rc, rc);
1367
1368 /*
1369 * Query the Physical TLB entry for the page (may fail).
1370 */
1371 PPGMPAGEMAPTLBE pTlbe;
1372 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1373 if (RT_SUCCESS(rc))
1374 {
1375 /* MMIO pages doesn't have any readable backing. */
1376 PPGMPAGE pPage = pTlbe->pPage;
1377 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1378 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1379 else
1380 {
1381 /*
1382 * Now, just perform the locking and calculate the return address.
1383 */
1384 PPGMPAGEMAP pMap = pTlbe->pMap;
1385 if (pMap)
1386 pMap->cRefs++;
1387
1388 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1389 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1390 {
1391 if (cLocks == 0)
1392 pVM->pgm.s.cReadLockedPages++;
1393 PGM_PAGE_INC_READ_LOCKS(pPage);
1394 }
1395 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1396 {
1397 PGM_PAGE_INC_READ_LOCKS(pPage);
1398 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1399 if (pMap)
1400 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1401 }
1402
1403 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1404 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1405 pLock->pvMap = pMap;
1406 }
1407 }
1408
1409 pgmUnlock(pVM);
1410#endif /* IN_RING3 || IN_RING0 */
1411 return rc;
1412}
1413
1414
1415/**
1416 * Requests the mapping of a guest page given by virtual address into the current context.
1417 *
1418 * This API should only be used for very short term, as it will consume
1419 * scarse resources (R0 and GC) in the mapping cache. When you're done
1420 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1421 *
1422 * This API will assume your intention is to write to the page, and will
1423 * therefore replace shared and zero pages. If you do not intend to modify
1424 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1425 *
1426 * @returns VBox status code.
1427 * @retval VINF_SUCCESS on success.
1428 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1429 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1430 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1431 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1432 *
1433 * @param pVCpu VMCPU handle.
1434 * @param GCPhys The guest physical address of the page that should be mapped.
1435 * @param ppv Where to store the address corresponding to GCPhys.
1436 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1437 *
1438 * @remark Avoid calling this API from within critical sections (other than
1439 * the PGM one) because of the deadlock risk.
1440 * @thread EMT
1441 */
1442VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1443{
1444 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1445 RTGCPHYS GCPhys;
1446 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1447 if (RT_SUCCESS(rc))
1448 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1449 return rc;
1450}
1451
1452
1453/**
1454 * Requests the mapping of a guest page given by virtual address into the current context.
1455 *
1456 * This API should only be used for very short term, as it will consume
1457 * scarse resources (R0 and GC) in the mapping cache. When you're done
1458 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1459 *
1460 * @returns VBox status code.
1461 * @retval VINF_SUCCESS on success.
1462 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1463 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1464 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1465 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1466 *
1467 * @param pVCpu VMCPU handle.
1468 * @param GCPhys The guest physical address of the page that should be mapped.
1469 * @param ppv Where to store the address corresponding to GCPhys.
1470 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1471 *
1472 * @remark Avoid calling this API from within critical sections (other than
1473 * the PGM one) because of the deadlock risk.
1474 * @thread EMT
1475 */
1476VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1477{
1478 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1479 RTGCPHYS GCPhys;
1480 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1481 if (RT_SUCCESS(rc))
1482 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1483 return rc;
1484}
1485
1486
1487/**
1488 * Release the mapping of a guest page.
1489 *
1490 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1491 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1492 *
1493 * @param pVM The VM handle.
1494 * @param pLock The lock structure initialized by the mapping function.
1495 */
1496VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1497{
1498#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1499 /* currently nothing to do here. */
1500 Assert(pLock->u32Dummy == UINT32_MAX);
1501 pLock->u32Dummy = 0;
1502
1503#else /* IN_RING3 */
1504 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1505 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1506 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1507
1508 pLock->uPageAndType = 0;
1509 pLock->pvMap = NULL;
1510
1511 pgmLock(pVM);
1512 if (fWriteLock)
1513 {
1514 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1515 Assert(cLocks > 0);
1516 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1517 {
1518 if (cLocks == 1)
1519 {
1520 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1521 pVM->pgm.s.cWriteLockedPages--;
1522 }
1523 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1524 }
1525
1526 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1527 {
1528 PGM_PAGE_SET_WRITTEN_TO(pPage);
1529 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1530 Assert(pVM->pgm.s.cMonitoredPages > 0);
1531 pVM->pgm.s.cMonitoredPages--;
1532 pVM->pgm.s.cWrittenToPages++;
1533 }
1534 }
1535 else
1536 {
1537 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1538 Assert(cLocks > 0);
1539 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1540 {
1541 if (cLocks == 1)
1542 {
1543 Assert(pVM->pgm.s.cReadLockedPages > 0);
1544 pVM->pgm.s.cReadLockedPages--;
1545 }
1546 PGM_PAGE_DEC_READ_LOCKS(pPage);
1547 }
1548 }
1549
1550 if (pMap)
1551 {
1552 Assert(pMap->cRefs >= 1);
1553 pMap->cRefs--;
1554 pMap->iAge = 0;
1555 }
1556 pgmUnlock(pVM);
1557#endif /* IN_RING3 */
1558}
1559
1560
1561/**
1562 * Converts a GC physical address to a HC ring-3 pointer.
1563 *
1564 * @returns VINF_SUCCESS on success.
1565 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1566 * page but has no physical backing.
1567 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1568 * GC physical address.
1569 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1570 * a dynamic ram chunk boundary
1571 *
1572 * @param pVM The VM handle.
1573 * @param GCPhys The GC physical address to convert.
1574 * @param cbRange Physical range
1575 * @param pR3Ptr Where to store the R3 pointer on success.
1576 *
1577 * @deprecated Avoid when possible!
1578 */
1579VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1580{
1581/** @todo this is kind of hacky and needs some more work. */
1582#ifndef DEBUG_sandervl
1583 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1584#endif
1585
1586 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1587#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1588 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1589#else
1590 pgmLock(pVM);
1591
1592 PPGMRAMRANGE pRam;
1593 PPGMPAGE pPage;
1594 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1595 if (RT_SUCCESS(rc))
1596 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1597
1598 pgmUnlock(pVM);
1599 Assert(rc <= VINF_SUCCESS);
1600 return rc;
1601#endif
1602}
1603
1604
1605#ifdef VBOX_STRICT
1606/**
1607 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1608 *
1609 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1610 * @param pVM The VM handle.
1611 * @param GCPhys The GC Physical addresss.
1612 * @param cbRange Physical range.
1613 *
1614 * @deprecated Avoid when possible.
1615 */
1616VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1617{
1618 RTR3PTR R3Ptr;
1619 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1620 if (RT_SUCCESS(rc))
1621 return R3Ptr;
1622 return NIL_RTR3PTR;
1623}
1624#endif /* VBOX_STRICT */
1625
1626
1627/**
1628 * Converts a guest pointer to a GC physical address.
1629 *
1630 * This uses the current CR3/CR0/CR4 of the guest.
1631 *
1632 * @returns VBox status code.
1633 * @param pVCpu The VMCPU Handle
1634 * @param GCPtr The guest pointer to convert.
1635 * @param pGCPhys Where to store the GC physical address.
1636 */
1637VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1638{
1639 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1640 if (pGCPhys && RT_SUCCESS(rc))
1641 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1642 return rc;
1643}
1644
1645
1646/**
1647 * Converts a guest pointer to a HC physical address.
1648 *
1649 * This uses the current CR3/CR0/CR4 of the guest.
1650 *
1651 * @returns VBox status code.
1652 * @param pVCpu The VMCPU Handle
1653 * @param GCPtr The guest pointer to convert.
1654 * @param pHCPhys Where to store the HC physical address.
1655 */
1656VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1657{
1658 PVM pVM = pVCpu->CTX_SUFF(pVM);
1659 RTGCPHYS GCPhys;
1660 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1661 if (RT_SUCCESS(rc))
1662 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1663 return rc;
1664}
1665
1666
1667/**
1668 * Converts a guest pointer to a R3 pointer.
1669 *
1670 * This uses the current CR3/CR0/CR4 of the guest.
1671 *
1672 * @returns VBox status code.
1673 * @param pVCpu The VMCPU Handle
1674 * @param GCPtr The guest pointer to convert.
1675 * @param pR3Ptr Where to store the R3 virtual address.
1676 *
1677 * @deprecated Don't use this.
1678 */
1679VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1680{
1681 PVM pVM = pVCpu->CTX_SUFF(pVM);
1682 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1683 RTGCPHYS GCPhys;
1684 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1685 if (RT_SUCCESS(rc))
1686 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1687 return rc;
1688}
1689
1690
1691
1692#undef LOG_GROUP
1693#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1694
1695
1696#ifdef IN_RING3
1697/**
1698 * Cache PGMPhys memory access
1699 *
1700 * @param pVM VM Handle.
1701 * @param pCache Cache structure pointer
1702 * @param GCPhys GC physical address
1703 * @param pbHC HC pointer corresponding to physical page
1704 *
1705 * @thread EMT.
1706 */
1707static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1708{
1709 uint32_t iCacheIndex;
1710
1711 Assert(VM_IS_EMT(pVM));
1712
1713 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1714 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1715
1716 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1717
1718 ASMBitSet(&pCache->aEntries, iCacheIndex);
1719
1720 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1721 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1722}
1723#endif /* IN_RING3 */
1724
1725
1726/**
1727 * Deals with reading from a page with one or more ALL access handlers.
1728 *
1729 * @returns VBox status code. Can be ignored in ring-3.
1730 * @retval VINF_SUCCESS.
1731 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1732 *
1733 * @param pVM The VM handle.
1734 * @param pPage The page descriptor.
1735 * @param GCPhys The physical address to start reading at.
1736 * @param pvBuf Where to put the bits we read.
1737 * @param cb How much to read - less or equal to a page.
1738 */
1739static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1740{
1741 /*
1742 * The most frequent access here is MMIO and shadowed ROM.
1743 * The current code ASSUMES all these access handlers covers full pages!
1744 */
1745
1746 /*
1747 * Whatever we do we need the source page, map it first.
1748 */
1749 const void *pvSrc = NULL;
1750 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1751 if (RT_FAILURE(rc))
1752 {
1753 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1754 GCPhys, pPage, rc));
1755 memset(pvBuf, 0xff, cb);
1756 return VINF_SUCCESS;
1757 }
1758 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1759
1760 /*
1761 * Deal with any physical handlers.
1762 */
1763 PPGMPHYSHANDLER pPhys = NULL;
1764 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1765 {
1766#ifdef IN_RING3
1767 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1768 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1769 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1770 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1771 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1772 Assert(pPhys->CTX_SUFF(pfnHandler));
1773
1774 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1775 void *pvUser = pPhys->CTX_SUFF(pvUser);
1776
1777 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1778 STAM_PROFILE_START(&pPhys->Stat, h);
1779 Assert(PGMIsLockOwner(pVM));
1780 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1781 pgmUnlock(pVM);
1782 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1783 pgmLock(pVM);
1784# ifdef VBOX_WITH_STATISTICS
1785 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1786 if (pPhys)
1787 STAM_PROFILE_STOP(&pPhys->Stat, h);
1788# else
1789 pPhys = NULL; /* might not be valid anymore. */
1790# endif
1791 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1792#else
1793 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1794 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1795 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1796#endif
1797 }
1798
1799 /*
1800 * Deal with any virtual handlers.
1801 */
1802 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1803 {
1804 unsigned iPage;
1805 PPGMVIRTHANDLER pVirt;
1806
1807 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1808 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1809 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1810 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1811 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1812
1813#ifdef IN_RING3
1814 if (pVirt->pfnHandlerR3)
1815 {
1816 if (!pPhys)
1817 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1818 else
1819 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1820 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1821 + (iPage << PAGE_SHIFT)
1822 + (GCPhys & PAGE_OFFSET_MASK);
1823
1824 STAM_PROFILE_START(&pVirt->Stat, h);
1825 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1826 STAM_PROFILE_STOP(&pVirt->Stat, h);
1827 if (rc2 == VINF_SUCCESS)
1828 rc = VINF_SUCCESS;
1829 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1830 }
1831 else
1832 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1833#else
1834 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1835 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1836 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1837#endif
1838 }
1839
1840 /*
1841 * Take the default action.
1842 */
1843 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1844 memcpy(pvBuf, pvSrc, cb);
1845 return rc;
1846}
1847
1848
1849/**
1850 * Read physical memory.
1851 *
1852 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1853 * want to ignore those.
1854 *
1855 * @returns VBox status code. Can be ignored in ring-3.
1856 * @retval VINF_SUCCESS.
1857 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1858 *
1859 * @param pVM VM Handle.
1860 * @param GCPhys Physical address start reading from.
1861 * @param pvBuf Where to put the read bits.
1862 * @param cbRead How many bytes to read.
1863 */
1864VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1865{
1866 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1867 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1868
1869 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1870 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1871
1872 pgmLock(pVM);
1873
1874 /*
1875 * Copy loop on ram ranges.
1876 */
1877 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1878 for (;;)
1879 {
1880 /* Find range. */
1881 while (pRam && GCPhys > pRam->GCPhysLast)
1882 pRam = pRam->CTX_SUFF(pNext);
1883 /* Inside range or not? */
1884 if (pRam && GCPhys >= pRam->GCPhys)
1885 {
1886 /*
1887 * Must work our way thru this page by page.
1888 */
1889 RTGCPHYS off = GCPhys - pRam->GCPhys;
1890 while (off < pRam->cb)
1891 {
1892 unsigned iPage = off >> PAGE_SHIFT;
1893 PPGMPAGE pPage = &pRam->aPages[iPage];
1894 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1895 if (cb > cbRead)
1896 cb = cbRead;
1897
1898 /*
1899 * Any ALL access handlers?
1900 */
1901 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1902 {
1903 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1904 if (RT_FAILURE(rc))
1905 {
1906 pgmUnlock(pVM);
1907 return rc;
1908 }
1909 }
1910 else
1911 {
1912 /*
1913 * Get the pointer to the page.
1914 */
1915 const void *pvSrc;
1916 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1917 if (RT_SUCCESS(rc))
1918 memcpy(pvBuf, pvSrc, cb);
1919 else
1920 {
1921 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1922 pRam->GCPhys + off, pPage, rc));
1923 memset(pvBuf, 0xff, cb);
1924 }
1925 }
1926
1927 /* next page */
1928 if (cb >= cbRead)
1929 {
1930 pgmUnlock(pVM);
1931 return VINF_SUCCESS;
1932 }
1933 cbRead -= cb;
1934 off += cb;
1935 pvBuf = (char *)pvBuf + cb;
1936 } /* walk pages in ram range. */
1937
1938 GCPhys = pRam->GCPhysLast + 1;
1939 }
1940 else
1941 {
1942 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1943
1944 /*
1945 * Unassigned address space.
1946 */
1947 if (!pRam)
1948 break;
1949 size_t cb = pRam->GCPhys - GCPhys;
1950 if (cb >= cbRead)
1951 {
1952 memset(pvBuf, 0xff, cbRead);
1953 break;
1954 }
1955 memset(pvBuf, 0xff, cb);
1956
1957 cbRead -= cb;
1958 pvBuf = (char *)pvBuf + cb;
1959 GCPhys += cb;
1960 }
1961 } /* Ram range walk */
1962
1963 pgmUnlock(pVM);
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1970 *
1971 * @returns VBox status code. Can be ignored in ring-3.
1972 * @retval VINF_SUCCESS.
1973 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1974 *
1975 * @param pVM The VM handle.
1976 * @param pPage The page descriptor.
1977 * @param GCPhys The physical address to start writing at.
1978 * @param pvBuf What to write.
1979 * @param cbWrite How much to write - less or equal to a page.
1980 */
1981static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1982{
1983 void *pvDst = NULL;
1984 int rc;
1985
1986 /*
1987 * Give priority to physical handlers (like #PF does).
1988 *
1989 * Hope for a lonely physical handler first that covers the whole
1990 * write area. This should be a pretty frequent case with MMIO and
1991 * the heavy usage of full page handlers in the page pool.
1992 */
1993 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1994 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1995 {
1996 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1997 if (pCur)
1998 {
1999 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2000 Assert(pCur->CTX_SUFF(pfnHandler));
2001
2002 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2003 if (cbRange > cbWrite)
2004 cbRange = cbWrite;
2005
2006#ifndef IN_RING3
2007 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2008 NOREF(cbRange);
2009 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2010 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2011
2012#else /* IN_RING3 */
2013 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2014 if (!PGM_PAGE_IS_MMIO(pPage))
2015 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2016 else
2017 rc = VINF_SUCCESS;
2018 if (RT_SUCCESS(rc))
2019 {
2020 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2021 void *pvUser = pCur->CTX_SUFF(pvUser);
2022
2023 STAM_PROFILE_START(&pCur->Stat, h);
2024 Assert(PGMIsLockOwner(pVM));
2025 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2026 pgmUnlock(pVM);
2027 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2028 pgmLock(pVM);
2029# ifdef VBOX_WITH_STATISTICS
2030 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2031 if (pCur)
2032 STAM_PROFILE_STOP(&pCur->Stat, h);
2033# else
2034 pCur = NULL; /* might not be valid anymore. */
2035# endif
2036 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2037 memcpy(pvDst, pvBuf, cbRange);
2038 else
2039 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2040 }
2041 else
2042 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2043 GCPhys, pPage, rc), rc);
2044 if (RT_LIKELY(cbRange == cbWrite))
2045 return VINF_SUCCESS;
2046
2047 /* more fun to be had below */
2048 cbWrite -= cbRange;
2049 GCPhys += cbRange;
2050 pvBuf = (uint8_t *)pvBuf + cbRange;
2051 pvDst = (uint8_t *)pvDst + cbRange;
2052#endif /* IN_RING3 */
2053 }
2054 /* else: the handler is somewhere else in the page, deal with it below. */
2055 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2056 }
2057 /*
2058 * A virtual handler without any interfering physical handlers.
2059 * Hopefully it'll conver the whole write.
2060 */
2061 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2062 {
2063 unsigned iPage;
2064 PPGMVIRTHANDLER pCur;
2065 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2066 if (RT_SUCCESS(rc))
2067 {
2068 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2069 if (cbRange > cbWrite)
2070 cbRange = cbWrite;
2071
2072#ifndef IN_RING3
2073 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2074 NOREF(cbRange);
2075 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2076 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2077
2078#else /* IN_RING3 */
2079
2080 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2081 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2082 if (RT_SUCCESS(rc))
2083 {
2084 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2085 if (pCur->pfnHandlerR3)
2086 {
2087 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2088 + (iPage << PAGE_SHIFT)
2089 + (GCPhys & PAGE_OFFSET_MASK);
2090
2091 STAM_PROFILE_START(&pCur->Stat, h);
2092 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2093 STAM_PROFILE_STOP(&pCur->Stat, h);
2094 }
2095 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2096 memcpy(pvDst, pvBuf, cbRange);
2097 else
2098 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2099 }
2100 else
2101 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2102 GCPhys, pPage, rc), rc);
2103 if (RT_LIKELY(cbRange == cbWrite))
2104 return VINF_SUCCESS;
2105
2106 /* more fun to be had below */
2107 cbWrite -= cbRange;
2108 GCPhys += cbRange;
2109 pvBuf = (uint8_t *)pvBuf + cbRange;
2110 pvDst = (uint8_t *)pvDst + cbRange;
2111#endif
2112 }
2113 /* else: the handler is somewhere else in the page, deal with it below. */
2114 }
2115
2116 /*
2117 * Deal with all the odd ends.
2118 */
2119
2120 /* We need a writable destination page. */
2121 if (!pvDst)
2122 {
2123 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2124 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2125 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2126 GCPhys, pPage, rc), rc);
2127 }
2128
2129 /* The loop state (big + ugly). */
2130 unsigned iVirtPage = 0;
2131 PPGMVIRTHANDLER pVirt = NULL;
2132 uint32_t offVirt = PAGE_SIZE;
2133 uint32_t offVirtLast = PAGE_SIZE;
2134 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2135
2136 PPGMPHYSHANDLER pPhys = NULL;
2137 uint32_t offPhys = PAGE_SIZE;
2138 uint32_t offPhysLast = PAGE_SIZE;
2139 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2140
2141 /* The loop. */
2142 for (;;)
2143 {
2144 /*
2145 * Find the closest handler at or above GCPhys.
2146 */
2147 if (fMoreVirt && !pVirt)
2148 {
2149 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2150 if (RT_SUCCESS(rc))
2151 {
2152 offVirt = 0;
2153 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2154 }
2155 else
2156 {
2157 PPGMPHYS2VIRTHANDLER pVirtPhys;
2158 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2159 GCPhys, true /* fAbove */);
2160 if ( pVirtPhys
2161 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2162 {
2163 /* ASSUME that pVirtPhys only covers one page. */
2164 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2165 Assert(pVirtPhys->Core.Key > GCPhys);
2166
2167 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2168 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2169 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2170 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2171 }
2172 else
2173 {
2174 pVirt = NULL;
2175 fMoreVirt = false;
2176 offVirt = offVirtLast = PAGE_SIZE;
2177 }
2178 }
2179 }
2180
2181 if (fMorePhys && !pPhys)
2182 {
2183 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2184 if (pPhys)
2185 {
2186 offPhys = 0;
2187 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2188 }
2189 else
2190 {
2191 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2192 GCPhys, true /* fAbove */);
2193 if ( pPhys
2194 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2195 {
2196 offPhys = pPhys->Core.Key - GCPhys;
2197 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2198 }
2199 else
2200 {
2201 pPhys = NULL;
2202 fMorePhys = false;
2203 offPhys = offPhysLast = PAGE_SIZE;
2204 }
2205 }
2206 }
2207
2208 /*
2209 * Handle access to space without handlers (that's easy).
2210 */
2211 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2212 uint32_t cbRange = (uint32_t)cbWrite;
2213 if (offPhys && offVirt)
2214 {
2215 if (cbRange > offPhys)
2216 cbRange = offPhys;
2217 if (cbRange > offVirt)
2218 cbRange = offVirt;
2219 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2220 }
2221 /*
2222 * Physical handler.
2223 */
2224 else if (!offPhys && offVirt)
2225 {
2226 if (cbRange > offPhysLast + 1)
2227 cbRange = offPhysLast + 1;
2228 if (cbRange > offVirt)
2229 cbRange = offVirt;
2230#ifdef IN_RING3
2231 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2232 void *pvUser = pPhys->CTX_SUFF(pvUser);
2233
2234 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2235 STAM_PROFILE_START(&pPhys->Stat, h);
2236 Assert(PGMIsLockOwner(pVM));
2237 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2238 pgmUnlock(pVM);
2239 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2240 pgmLock(pVM);
2241# ifdef VBOX_WITH_STATISTICS
2242 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2243 if (pPhys)
2244 STAM_PROFILE_STOP(&pPhys->Stat, h);
2245# else
2246 pPhys = NULL; /* might not be valid anymore. */
2247# endif
2248 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2249#else
2250 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2251 NOREF(cbRange);
2252 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2253 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2254#endif
2255 }
2256 /*
2257 * Virtual handler.
2258 */
2259 else if (offPhys && !offVirt)
2260 {
2261 if (cbRange > offVirtLast + 1)
2262 cbRange = offVirtLast + 1;
2263 if (cbRange > offPhys)
2264 cbRange = offPhys;
2265#ifdef IN_RING3
2266 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2267 if (pVirt->pfnHandlerR3)
2268 {
2269 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2270 + (iVirtPage << PAGE_SHIFT)
2271 + (GCPhys & PAGE_OFFSET_MASK);
2272 STAM_PROFILE_START(&pVirt->Stat, h);
2273 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2274 STAM_PROFILE_STOP(&pVirt->Stat, h);
2275 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2276 }
2277 pVirt = NULL;
2278#else
2279 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2280 NOREF(cbRange);
2281 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2282 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2283#endif
2284 }
2285 /*
2286 * Both... give the physical one priority.
2287 */
2288 else
2289 {
2290 Assert(!offPhys && !offVirt);
2291 if (cbRange > offVirtLast + 1)
2292 cbRange = offVirtLast + 1;
2293 if (cbRange > offPhysLast + 1)
2294 cbRange = offPhysLast + 1;
2295
2296#ifdef IN_RING3
2297 if (pVirt->pfnHandlerR3)
2298 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2299 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2300
2301 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2302 void *pvUser = pPhys->CTX_SUFF(pvUser);
2303
2304 STAM_PROFILE_START(&pPhys->Stat, h);
2305 Assert(PGMIsLockOwner(pVM));
2306 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2307 pgmUnlock(pVM);
2308 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2309 pgmLock(pVM);
2310# ifdef VBOX_WITH_STATISTICS
2311 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2312 if (pPhys)
2313 STAM_PROFILE_STOP(&pPhys->Stat, h);
2314# else
2315 pPhys = NULL; /* might not be valid anymore. */
2316# endif
2317 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2318 if (pVirt->pfnHandlerR3)
2319 {
2320
2321 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2322 + (iVirtPage << PAGE_SHIFT)
2323 + (GCPhys & PAGE_OFFSET_MASK);
2324 STAM_PROFILE_START(&pVirt->Stat, h2);
2325 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2326 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2327 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2328 rc = VINF_SUCCESS;
2329 else
2330 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2331 }
2332 pPhys = NULL;
2333 pVirt = NULL;
2334#else
2335 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2336 NOREF(cbRange);
2337 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2338 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2339#endif
2340 }
2341 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2342 memcpy(pvDst, pvBuf, cbRange);
2343
2344 /*
2345 * Advance if we've got more stuff to do.
2346 */
2347 if (cbRange >= cbWrite)
2348 return VINF_SUCCESS;
2349
2350 cbWrite -= cbRange;
2351 GCPhys += cbRange;
2352 pvBuf = (uint8_t *)pvBuf + cbRange;
2353 pvDst = (uint8_t *)pvDst + cbRange;
2354
2355 offPhys -= cbRange;
2356 offPhysLast -= cbRange;
2357 offVirt -= cbRange;
2358 offVirtLast -= cbRange;
2359 }
2360}
2361
2362
2363/**
2364 * Write to physical memory.
2365 *
2366 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2367 * want to ignore those.
2368 *
2369 * @returns VBox status code. Can be ignored in ring-3.
2370 * @retval VINF_SUCCESS.
2371 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2372 *
2373 * @param pVM VM Handle.
2374 * @param GCPhys Physical address to write to.
2375 * @param pvBuf What to write.
2376 * @param cbWrite How many bytes to write.
2377 */
2378VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2379{
2380 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2381 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2382 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2383
2384 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2385 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2386
2387 pgmLock(pVM);
2388
2389 /*
2390 * Copy loop on ram ranges.
2391 */
2392 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2393 for (;;)
2394 {
2395 /* Find range. */
2396 while (pRam && GCPhys > pRam->GCPhysLast)
2397 pRam = pRam->CTX_SUFF(pNext);
2398 /* Inside range or not? */
2399 if (pRam && GCPhys >= pRam->GCPhys)
2400 {
2401 /*
2402 * Must work our way thru this page by page.
2403 */
2404 RTGCPTR off = GCPhys - pRam->GCPhys;
2405 while (off < pRam->cb)
2406 {
2407 RTGCPTR iPage = off >> PAGE_SHIFT;
2408 PPGMPAGE pPage = &pRam->aPages[iPage];
2409 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2410 if (cb > cbWrite)
2411 cb = cbWrite;
2412
2413 /*
2414 * Any active WRITE or ALL access handlers?
2415 */
2416 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2417 {
2418 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2419 if (RT_FAILURE(rc))
2420 {
2421 pgmUnlock(pVM);
2422 return rc;
2423 }
2424 }
2425 else
2426 {
2427 /*
2428 * Get the pointer to the page.
2429 */
2430 void *pvDst;
2431 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2432 if (RT_SUCCESS(rc))
2433 memcpy(pvDst, pvBuf, cb);
2434 else
2435 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2436 pRam->GCPhys + off, pPage, rc));
2437 }
2438
2439 /* next page */
2440 if (cb >= cbWrite)
2441 {
2442 pgmUnlock(pVM);
2443 return VINF_SUCCESS;
2444 }
2445
2446 cbWrite -= cb;
2447 off += cb;
2448 pvBuf = (const char *)pvBuf + cb;
2449 } /* walk pages in ram range */
2450
2451 GCPhys = pRam->GCPhysLast + 1;
2452 }
2453 else
2454 {
2455 /*
2456 * Unassigned address space, skip it.
2457 */
2458 if (!pRam)
2459 break;
2460 size_t cb = pRam->GCPhys - GCPhys;
2461 if (cb >= cbWrite)
2462 break;
2463 cbWrite -= cb;
2464 pvBuf = (const char *)pvBuf + cb;
2465 GCPhys += cb;
2466 }
2467 } /* Ram range walk */
2468
2469 pgmUnlock(pVM);
2470 return VINF_SUCCESS;
2471}
2472
2473
2474/**
2475 * Read from guest physical memory by GC physical address, bypassing
2476 * MMIO and access handlers.
2477 *
2478 * @returns VBox status.
2479 * @param pVM VM handle.
2480 * @param pvDst The destination address.
2481 * @param GCPhysSrc The source address (GC physical address).
2482 * @param cb The number of bytes to read.
2483 */
2484VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2485{
2486 /*
2487 * Treat the first page as a special case.
2488 */
2489 if (!cb)
2490 return VINF_SUCCESS;
2491
2492 /* map the 1st page */
2493 void const *pvSrc;
2494 PGMPAGEMAPLOCK Lock;
2495 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2496 if (RT_FAILURE(rc))
2497 return rc;
2498
2499 /* optimize for the case where access is completely within the first page. */
2500 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2501 if (RT_LIKELY(cb <= cbPage))
2502 {
2503 memcpy(pvDst, pvSrc, cb);
2504 PGMPhysReleasePageMappingLock(pVM, &Lock);
2505 return VINF_SUCCESS;
2506 }
2507
2508 /* copy to the end of the page. */
2509 memcpy(pvDst, pvSrc, cbPage);
2510 PGMPhysReleasePageMappingLock(pVM, &Lock);
2511 GCPhysSrc += cbPage;
2512 pvDst = (uint8_t *)pvDst + cbPage;
2513 cb -= cbPage;
2514
2515 /*
2516 * Page by page.
2517 */
2518 for (;;)
2519 {
2520 /* map the page */
2521 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2522 if (RT_FAILURE(rc))
2523 return rc;
2524
2525 /* last page? */
2526 if (cb <= PAGE_SIZE)
2527 {
2528 memcpy(pvDst, pvSrc, cb);
2529 PGMPhysReleasePageMappingLock(pVM, &Lock);
2530 return VINF_SUCCESS;
2531 }
2532
2533 /* copy the entire page and advance */
2534 memcpy(pvDst, pvSrc, PAGE_SIZE);
2535 PGMPhysReleasePageMappingLock(pVM, &Lock);
2536 GCPhysSrc += PAGE_SIZE;
2537 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2538 cb -= PAGE_SIZE;
2539 }
2540 /* won't ever get here. */
2541}
2542
2543
2544/**
2545 * Write to guest physical memory referenced by GC pointer.
2546 * Write memory to GC physical address in guest physical memory.
2547 *
2548 * This will bypass MMIO and access handlers.
2549 *
2550 * @returns VBox status.
2551 * @param pVM VM handle.
2552 * @param GCPhysDst The GC physical address of the destination.
2553 * @param pvSrc The source buffer.
2554 * @param cb The number of bytes to write.
2555 */
2556VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2557{
2558 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2559
2560 /*
2561 * Treat the first page as a special case.
2562 */
2563 if (!cb)
2564 return VINF_SUCCESS;
2565
2566 /* map the 1st page */
2567 void *pvDst;
2568 PGMPAGEMAPLOCK Lock;
2569 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2570 if (RT_FAILURE(rc))
2571 return rc;
2572
2573 /* optimize for the case where access is completely within the first page. */
2574 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2575 if (RT_LIKELY(cb <= cbPage))
2576 {
2577 memcpy(pvDst, pvSrc, cb);
2578 PGMPhysReleasePageMappingLock(pVM, &Lock);
2579 return VINF_SUCCESS;
2580 }
2581
2582 /* copy to the end of the page. */
2583 memcpy(pvDst, pvSrc, cbPage);
2584 PGMPhysReleasePageMappingLock(pVM, &Lock);
2585 GCPhysDst += cbPage;
2586 pvSrc = (const uint8_t *)pvSrc + cbPage;
2587 cb -= cbPage;
2588
2589 /*
2590 * Page by page.
2591 */
2592 for (;;)
2593 {
2594 /* map the page */
2595 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2596 if (RT_FAILURE(rc))
2597 return rc;
2598
2599 /* last page? */
2600 if (cb <= PAGE_SIZE)
2601 {
2602 memcpy(pvDst, pvSrc, cb);
2603 PGMPhysReleasePageMappingLock(pVM, &Lock);
2604 return VINF_SUCCESS;
2605 }
2606
2607 /* copy the entire page and advance */
2608 memcpy(pvDst, pvSrc, PAGE_SIZE);
2609 PGMPhysReleasePageMappingLock(pVM, &Lock);
2610 GCPhysDst += PAGE_SIZE;
2611 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2612 cb -= PAGE_SIZE;
2613 }
2614 /* won't ever get here. */
2615}
2616
2617
2618/**
2619 * Read from guest physical memory referenced by GC pointer.
2620 *
2621 * This function uses the current CR3/CR0/CR4 of the guest and will
2622 * bypass access handlers and not set any accessed bits.
2623 *
2624 * @returns VBox status.
2625 * @param pVCpu The VMCPU handle.
2626 * @param pvDst The destination address.
2627 * @param GCPtrSrc The source address (GC pointer).
2628 * @param cb The number of bytes to read.
2629 */
2630VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2631{
2632 PVM pVM = pVCpu->CTX_SUFF(pVM);
2633
2634 /*
2635 * Treat the first page as a special case.
2636 */
2637 if (!cb)
2638 return VINF_SUCCESS;
2639
2640 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2641 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2642
2643 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2644 * when many VCPUs are fighting for the lock.
2645 */
2646 pgmLock(pVM);
2647
2648 /* map the 1st page */
2649 void const *pvSrc;
2650 PGMPAGEMAPLOCK Lock;
2651 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2652 if (RT_FAILURE(rc))
2653 {
2654 pgmUnlock(pVM);
2655 return rc;
2656 }
2657
2658 /* optimize for the case where access is completely within the first page. */
2659 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2660 if (RT_LIKELY(cb <= cbPage))
2661 {
2662 memcpy(pvDst, pvSrc, cb);
2663 PGMPhysReleasePageMappingLock(pVM, &Lock);
2664 pgmUnlock(pVM);
2665 return VINF_SUCCESS;
2666 }
2667
2668 /* copy to the end of the page. */
2669 memcpy(pvDst, pvSrc, cbPage);
2670 PGMPhysReleasePageMappingLock(pVM, &Lock);
2671 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2672 pvDst = (uint8_t *)pvDst + cbPage;
2673 cb -= cbPage;
2674
2675 /*
2676 * Page by page.
2677 */
2678 for (;;)
2679 {
2680 /* map the page */
2681 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2682 if (RT_FAILURE(rc))
2683 {
2684 pgmUnlock(pVM);
2685 return rc;
2686 }
2687
2688 /* last page? */
2689 if (cb <= PAGE_SIZE)
2690 {
2691 memcpy(pvDst, pvSrc, cb);
2692 PGMPhysReleasePageMappingLock(pVM, &Lock);
2693 pgmUnlock(pVM);
2694 return VINF_SUCCESS;
2695 }
2696
2697 /* copy the entire page and advance */
2698 memcpy(pvDst, pvSrc, PAGE_SIZE);
2699 PGMPhysReleasePageMappingLock(pVM, &Lock);
2700 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2701 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2702 cb -= PAGE_SIZE;
2703 }
2704 /* won't ever get here. */
2705}
2706
2707
2708/**
2709 * Write to guest physical memory referenced by GC pointer.
2710 *
2711 * This function uses the current CR3/CR0/CR4 of the guest and will
2712 * bypass access handlers and not set dirty or accessed bits.
2713 *
2714 * @returns VBox status.
2715 * @param pVCpu The VMCPU handle.
2716 * @param GCPtrDst The destination address (GC pointer).
2717 * @param pvSrc The source address.
2718 * @param cb The number of bytes to write.
2719 */
2720VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2721{
2722 PVM pVM = pVCpu->CTX_SUFF(pVM);
2723
2724 /*
2725 * Treat the first page as a special case.
2726 */
2727 if (!cb)
2728 return VINF_SUCCESS;
2729
2730 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2731 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2732
2733 /* map the 1st page */
2734 void *pvDst;
2735 PGMPAGEMAPLOCK Lock;
2736 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2737 if (RT_FAILURE(rc))
2738 return rc;
2739
2740 /* optimize for the case where access is completely within the first page. */
2741 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2742 if (RT_LIKELY(cb <= cbPage))
2743 {
2744 memcpy(pvDst, pvSrc, cb);
2745 PGMPhysReleasePageMappingLock(pVM, &Lock);
2746 return VINF_SUCCESS;
2747 }
2748
2749 /* copy to the end of the page. */
2750 memcpy(pvDst, pvSrc, cbPage);
2751 PGMPhysReleasePageMappingLock(pVM, &Lock);
2752 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2753 pvSrc = (const uint8_t *)pvSrc + cbPage;
2754 cb -= cbPage;
2755
2756 /*
2757 * Page by page.
2758 */
2759 for (;;)
2760 {
2761 /* map the page */
2762 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2763 if (RT_FAILURE(rc))
2764 return rc;
2765
2766 /* last page? */
2767 if (cb <= PAGE_SIZE)
2768 {
2769 memcpy(pvDst, pvSrc, cb);
2770 PGMPhysReleasePageMappingLock(pVM, &Lock);
2771 return VINF_SUCCESS;
2772 }
2773
2774 /* copy the entire page and advance */
2775 memcpy(pvDst, pvSrc, PAGE_SIZE);
2776 PGMPhysReleasePageMappingLock(pVM, &Lock);
2777 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2778 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2779 cb -= PAGE_SIZE;
2780 }
2781 /* won't ever get here. */
2782}
2783
2784
2785/**
2786 * Write to guest physical memory referenced by GC pointer and update the PTE.
2787 *
2788 * This function uses the current CR3/CR0/CR4 of the guest and will
2789 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2790 *
2791 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2792 *
2793 * @returns VBox status.
2794 * @param pVCpu The VMCPU handle.
2795 * @param GCPtrDst The destination address (GC pointer).
2796 * @param pvSrc The source address.
2797 * @param cb The number of bytes to write.
2798 */
2799VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2800{
2801 PVM pVM = pVCpu->CTX_SUFF(pVM);
2802
2803 /*
2804 * Treat the first page as a special case.
2805 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2806 */
2807 if (!cb)
2808 return VINF_SUCCESS;
2809
2810 /* map the 1st page */
2811 void *pvDst;
2812 PGMPAGEMAPLOCK Lock;
2813 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2814 if (RT_FAILURE(rc))
2815 return rc;
2816
2817 /* optimize for the case where access is completely within the first page. */
2818 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2819 if (RT_LIKELY(cb <= cbPage))
2820 {
2821 memcpy(pvDst, pvSrc, cb);
2822 PGMPhysReleasePageMappingLock(pVM, &Lock);
2823 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2824 return VINF_SUCCESS;
2825 }
2826
2827 /* copy to the end of the page. */
2828 memcpy(pvDst, pvSrc, cbPage);
2829 PGMPhysReleasePageMappingLock(pVM, &Lock);
2830 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2831 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2832 pvSrc = (const uint8_t *)pvSrc + cbPage;
2833 cb -= cbPage;
2834
2835 /*
2836 * Page by page.
2837 */
2838 for (;;)
2839 {
2840 /* map the page */
2841 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2842 if (RT_FAILURE(rc))
2843 return rc;
2844
2845 /* last page? */
2846 if (cb <= PAGE_SIZE)
2847 {
2848 memcpy(pvDst, pvSrc, cb);
2849 PGMPhysReleasePageMappingLock(pVM, &Lock);
2850 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2851 return VINF_SUCCESS;
2852 }
2853
2854 /* copy the entire page and advance */
2855 memcpy(pvDst, pvSrc, PAGE_SIZE);
2856 PGMPhysReleasePageMappingLock(pVM, &Lock);
2857 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2858 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2859 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2860 cb -= PAGE_SIZE;
2861 }
2862 /* won't ever get here. */
2863}
2864
2865
2866/**
2867 * Read from guest physical memory referenced by GC pointer.
2868 *
2869 * This function uses the current CR3/CR0/CR4 of the guest and will
2870 * respect access handlers and set accessed bits.
2871 *
2872 * @returns VBox status.
2873 * @param pVCpu The VMCPU handle.
2874 * @param pvDst The destination address.
2875 * @param GCPtrSrc The source address (GC pointer).
2876 * @param cb The number of bytes to read.
2877 * @thread The vCPU EMT.
2878 */
2879VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2880{
2881 RTGCPHYS GCPhys;
2882 uint64_t fFlags;
2883 int rc;
2884 PVM pVM = pVCpu->CTX_SUFF(pVM);
2885
2886 /*
2887 * Anything to do?
2888 */
2889 if (!cb)
2890 return VINF_SUCCESS;
2891
2892 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2893
2894 /*
2895 * Optimize reads within a single page.
2896 */
2897 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2898 {
2899 /* Convert virtual to physical address + flags */
2900 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2901 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2902 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2903
2904 /* mark the guest page as accessed. */
2905 if (!(fFlags & X86_PTE_A))
2906 {
2907 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2908 AssertRC(rc);
2909 }
2910
2911 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2912 }
2913
2914 /*
2915 * Page by page.
2916 */
2917 for (;;)
2918 {
2919 /* Convert virtual to physical address + flags */
2920 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2921 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2922 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2923
2924 /* mark the guest page as accessed. */
2925 if (!(fFlags & X86_PTE_A))
2926 {
2927 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2928 AssertRC(rc);
2929 }
2930
2931 /* copy */
2932 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2933 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2934 if (cbRead >= cb || RT_FAILURE(rc))
2935 return rc;
2936
2937 /* next */
2938 cb -= cbRead;
2939 pvDst = (uint8_t *)pvDst + cbRead;
2940 GCPtrSrc += cbRead;
2941 }
2942}
2943
2944
2945/**
2946 * Write to guest physical memory referenced by GC pointer.
2947 *
2948 * This function uses the current CR3/CR0/CR4 of the guest and will
2949 * respect access handlers and set dirty and accessed bits.
2950 *
2951 * @returns VBox status.
2952 * @retval VINF_SUCCESS.
2953 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2954 *
2955 * @param pVCpu The VMCPU handle.
2956 * @param GCPtrDst The destination address (GC pointer).
2957 * @param pvSrc The source address.
2958 * @param cb The number of bytes to write.
2959 */
2960VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2961{
2962 RTGCPHYS GCPhys;
2963 uint64_t fFlags;
2964 int rc;
2965 PVM pVM = pVCpu->CTX_SUFF(pVM);
2966
2967 /*
2968 * Anything to do?
2969 */
2970 if (!cb)
2971 return VINF_SUCCESS;
2972
2973 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2974
2975 /*
2976 * Optimize writes within a single page.
2977 */
2978 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2979 {
2980 /* Convert virtual to physical address + flags */
2981 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2982 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2983 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2984
2985 /* Mention when we ignore X86_PTE_RW... */
2986 if (!(fFlags & X86_PTE_RW))
2987 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2988
2989 /* Mark the guest page as accessed and dirty if necessary. */
2990 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2991 {
2992 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2993 AssertRC(rc);
2994 }
2995
2996 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2997 }
2998
2999 /*
3000 * Page by page.
3001 */
3002 for (;;)
3003 {
3004 /* Convert virtual to physical address + flags */
3005 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3006 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3007 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3008
3009 /* Mention when we ignore X86_PTE_RW... */
3010 if (!(fFlags & X86_PTE_RW))
3011 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3012
3013 /* Mark the guest page as accessed and dirty if necessary. */
3014 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3015 {
3016 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3017 AssertRC(rc);
3018 }
3019
3020 /* copy */
3021 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3022 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3023 if (cbWrite >= cb || RT_FAILURE(rc))
3024 return rc;
3025
3026 /* next */
3027 cb -= cbWrite;
3028 pvSrc = (uint8_t *)pvSrc + cbWrite;
3029 GCPtrDst += cbWrite;
3030 }
3031}
3032
3033
3034/**
3035 * Performs a read of guest virtual memory for instruction emulation.
3036 *
3037 * This will check permissions, raise exceptions and update the access bits.
3038 *
3039 * The current implementation will bypass all access handlers. It may later be
3040 * changed to at least respect MMIO.
3041 *
3042 *
3043 * @returns VBox status code suitable to scheduling.
3044 * @retval VINF_SUCCESS if the read was performed successfully.
3045 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3046 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3047 *
3048 * @param pVCpu The VMCPU handle.
3049 * @param pCtxCore The context core.
3050 * @param pvDst Where to put the bytes we've read.
3051 * @param GCPtrSrc The source address.
3052 * @param cb The number of bytes to read. Not more than a page.
3053 *
3054 * @remark This function will dynamically map physical pages in GC. This may unmap
3055 * mappings done by the caller. Be careful!
3056 */
3057VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3058{
3059 PVM pVM = pVCpu->CTX_SUFF(pVM);
3060 Assert(cb <= PAGE_SIZE);
3061
3062/** @todo r=bird: This isn't perfect!
3063 * -# It's not checking for reserved bits being 1.
3064 * -# It's not correctly dealing with the access bit.
3065 * -# It's not respecting MMIO memory or any other access handlers.
3066 */
3067 /*
3068 * 1. Translate virtual to physical. This may fault.
3069 * 2. Map the physical address.
3070 * 3. Do the read operation.
3071 * 4. Set access bits if required.
3072 */
3073 int rc;
3074 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3075 if (cb <= cb1)
3076 {
3077 /*
3078 * Not crossing pages.
3079 */
3080 RTGCPHYS GCPhys;
3081 uint64_t fFlags;
3082 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3083 if (RT_SUCCESS(rc))
3084 {
3085 /** @todo we should check reserved bits ... */
3086 void *pvSrc;
3087 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
3088 switch (rc)
3089 {
3090 case VINF_SUCCESS:
3091 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3092 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3093 break;
3094 case VERR_PGM_PHYS_PAGE_RESERVED:
3095 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3096 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
3097 break;
3098 default:
3099 return rc;
3100 }
3101
3102 /** @todo access bit emulation isn't 100% correct. */
3103 if (!(fFlags & X86_PTE_A))
3104 {
3105 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3106 AssertRC(rc);
3107 }
3108 return VINF_SUCCESS;
3109 }
3110 }
3111 else
3112 {
3113 /*
3114 * Crosses pages.
3115 */
3116 size_t cb2 = cb - cb1;
3117 uint64_t fFlags1;
3118 RTGCPHYS GCPhys1;
3119 uint64_t fFlags2;
3120 RTGCPHYS GCPhys2;
3121 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3122 if (RT_SUCCESS(rc))
3123 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3124 if (RT_SUCCESS(rc))
3125 {
3126 /** @todo we should check reserved bits ... */
3127 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3128 void *pvSrc1;
3129 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
3130 switch (rc)
3131 {
3132 case VINF_SUCCESS:
3133 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3134 break;
3135 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3136 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
3137 break;
3138 default:
3139 return rc;
3140 }
3141
3142 void *pvSrc2;
3143 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
3144 switch (rc)
3145 {
3146 case VINF_SUCCESS:
3147 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3148 break;
3149 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3150 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
3151 break;
3152 default:
3153 return rc;
3154 }
3155
3156 if (!(fFlags1 & X86_PTE_A))
3157 {
3158 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3159 AssertRC(rc);
3160 }
3161 if (!(fFlags2 & X86_PTE_A))
3162 {
3163 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3164 AssertRC(rc);
3165 }
3166 return VINF_SUCCESS;
3167 }
3168 }
3169
3170 /*
3171 * Raise a #PF.
3172 */
3173 uint32_t uErr;
3174
3175 /* Get the current privilege level. */
3176 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3177 switch (rc)
3178 {
3179 case VINF_SUCCESS:
3180 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3181 break;
3182
3183 case VERR_PAGE_NOT_PRESENT:
3184 case VERR_PAGE_TABLE_NOT_PRESENT:
3185 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3186 break;
3187
3188 default:
3189 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3190 return rc;
3191 }
3192 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3193 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3194}
3195
3196
3197/**
3198 * Performs a read of guest virtual memory for instruction emulation.
3199 *
3200 * This will check permissions, raise exceptions and update the access bits.
3201 *
3202 * The current implementation will bypass all access handlers. It may later be
3203 * changed to at least respect MMIO.
3204 *
3205 *
3206 * @returns VBox status code suitable to scheduling.
3207 * @retval VINF_SUCCESS if the read was performed successfully.
3208 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3209 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3210 *
3211 * @param pVCpu The VMCPU handle.
3212 * @param pCtxCore The context core.
3213 * @param pvDst Where to put the bytes we've read.
3214 * @param GCPtrSrc The source address.
3215 * @param cb The number of bytes to read. Not more than a page.
3216 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3217 * an appropriate error status will be returned (no
3218 * informational at all).
3219 *
3220 *
3221 * @remarks Takes the PGM lock.
3222 * @remarks A page fault on the 2nd page of the access will be raised without
3223 * writing the bits on the first page since we're ASSUMING that the
3224 * caller is emulating an instruction access.
3225 * @remarks This function will dynamically map physical pages in GC. This may
3226 * unmap mappings done by the caller. Be careful!
3227 */
3228VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3229{
3230 PVM pVM = pVCpu->CTX_SUFF(pVM);
3231 Assert(cb <= PAGE_SIZE);
3232
3233 /*
3234 * 1. Translate virtual to physical. This may fault.
3235 * 2. Map the physical address.
3236 * 3. Do the read operation.
3237 * 4. Set access bits if required.
3238 */
3239 int rc;
3240 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3241 if (cb <= cb1)
3242 {
3243 /*
3244 * Not crossing pages.
3245 */
3246 RTGCPHYS GCPhys;
3247 uint64_t fFlags;
3248 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3249 if (RT_SUCCESS(rc))
3250 {
3251 if (1) /** @todo we should check reserved bits ... */
3252 {
3253 const void *pvSrc;
3254 PGMPAGEMAPLOCK Lock;
3255 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3256 switch (rc)
3257 {
3258 case VINF_SUCCESS:
3259 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3260 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3261 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3262 break;
3263 case VERR_PGM_PHYS_PAGE_RESERVED:
3264 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3265 memset(pvDst, 0xff, cb);
3266 break;
3267 default:
3268 AssertMsgFailed(("%Rrc\n", rc));
3269 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3270 return rc;
3271 }
3272 PGMPhysReleasePageMappingLock(pVM, &Lock);
3273
3274 if (!(fFlags & X86_PTE_A))
3275 {
3276 /** @todo access bit emulation isn't 100% correct. */
3277 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3278 AssertRC(rc);
3279 }
3280 return VINF_SUCCESS;
3281 }
3282 }
3283 }
3284 else
3285 {
3286 /*
3287 * Crosses pages.
3288 */
3289 size_t cb2 = cb - cb1;
3290 uint64_t fFlags1;
3291 RTGCPHYS GCPhys1;
3292 uint64_t fFlags2;
3293 RTGCPHYS GCPhys2;
3294 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3295 if (RT_SUCCESS(rc))
3296 {
3297 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3298 if (RT_SUCCESS(rc))
3299 {
3300 if (1) /** @todo we should check reserved bits ... */
3301 {
3302 const void *pvSrc;
3303 PGMPAGEMAPLOCK Lock;
3304 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3305 switch (rc)
3306 {
3307 case VINF_SUCCESS:
3308 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3309 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3310 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3311 PGMPhysReleasePageMappingLock(pVM, &Lock);
3312 break;
3313 case VERR_PGM_PHYS_PAGE_RESERVED:
3314 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3315 memset(pvDst, 0xff, cb1);
3316 break;
3317 default:
3318 AssertMsgFailed(("%Rrc\n", rc));
3319 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3320 return rc;
3321 }
3322
3323 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3324 switch (rc)
3325 {
3326 case VINF_SUCCESS:
3327 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3328 PGMPhysReleasePageMappingLock(pVM, &Lock);
3329 break;
3330 case VERR_PGM_PHYS_PAGE_RESERVED:
3331 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3332 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3333 break;
3334 default:
3335 AssertMsgFailed(("%Rrc\n", rc));
3336 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3337 return rc;
3338 }
3339
3340 if (!(fFlags1 & X86_PTE_A))
3341 {
3342 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3343 AssertRC(rc);
3344 }
3345 if (!(fFlags2 & X86_PTE_A))
3346 {
3347 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3348 AssertRC(rc);
3349 }
3350 return VINF_SUCCESS;
3351 }
3352 /* sort out which page */
3353 }
3354 else
3355 GCPtrSrc += cb1; /* fault on 2nd page */
3356 }
3357 }
3358
3359 /*
3360 * Raise a #PF if we're allowed to do that.
3361 */
3362 /* Calc the error bits. */
3363 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3364 uint32_t uErr;
3365 switch (rc)
3366 {
3367 case VINF_SUCCESS:
3368 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3369 rc = VERR_ACCESS_DENIED;
3370 break;
3371
3372 case VERR_PAGE_NOT_PRESENT:
3373 case VERR_PAGE_TABLE_NOT_PRESENT:
3374 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3375 break;
3376
3377 default:
3378 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3379 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3380 return rc;
3381 }
3382 if (fRaiseTrap)
3383 {
3384 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3385 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3386 }
3387 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3388 return rc;
3389}
3390
3391
3392/**
3393 * Performs a write to guest virtual memory for instruction emulation.
3394 *
3395 * This will check permissions, raise exceptions and update the dirty and access
3396 * bits.
3397 *
3398 * @returns VBox status code suitable to scheduling.
3399 * @retval VINF_SUCCESS if the read was performed successfully.
3400 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3401 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3402 *
3403 * @param pVCpu The VMCPU handle.
3404 * @param pCtxCore The context core.
3405 * @param GCPtrDst The destination address.
3406 * @param pvSrc What to write.
3407 * @param cb The number of bytes to write. Not more than a page.
3408 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3409 * an appropriate error status will be returned (no
3410 * informational at all).
3411 *
3412 * @remarks Takes the PGM lock.
3413 * @remarks A page fault on the 2nd page of the access will be raised without
3414 * writing the bits on the first page since we're ASSUMING that the
3415 * caller is emulating an instruction access.
3416 * @remarks This function will dynamically map physical pages in GC. This may
3417 * unmap mappings done by the caller. Be careful!
3418 */
3419VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3420{
3421 Assert(cb <= PAGE_SIZE);
3422 PVM pVM = pVCpu->CTX_SUFF(pVM);
3423
3424 /*
3425 * 1. Translate virtual to physical. This may fault.
3426 * 2. Map the physical address.
3427 * 3. Do the write operation.
3428 * 4. Set access bits if required.
3429 */
3430 int rc;
3431 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3432 if (cb <= cb1)
3433 {
3434 /*
3435 * Not crossing pages.
3436 */
3437 RTGCPHYS GCPhys;
3438 uint64_t fFlags;
3439 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3440 if (RT_SUCCESS(rc))
3441 {
3442 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3443 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3444 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3445 {
3446 void *pvDst;
3447 PGMPAGEMAPLOCK Lock;
3448 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3449 switch (rc)
3450 {
3451 case VINF_SUCCESS:
3452 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3453 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3454 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3455 PGMPhysReleasePageMappingLock(pVM, &Lock);
3456 break;
3457 case VERR_PGM_PHYS_PAGE_RESERVED:
3458 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3459 /* bit bucket */
3460 break;
3461 default:
3462 AssertMsgFailed(("%Rrc\n", rc));
3463 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3464 return rc;
3465 }
3466
3467 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3468 {
3469 /** @todo dirty & access bit emulation isn't 100% correct. */
3470 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3471 AssertRC(rc);
3472 }
3473 return VINF_SUCCESS;
3474 }
3475 rc = VERR_ACCESS_DENIED;
3476 }
3477 }
3478 else
3479 {
3480 /*
3481 * Crosses pages.
3482 */
3483 size_t cb2 = cb - cb1;
3484 uint64_t fFlags1;
3485 RTGCPHYS GCPhys1;
3486 uint64_t fFlags2;
3487 RTGCPHYS GCPhys2;
3488 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3489 if (RT_SUCCESS(rc))
3490 {
3491 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3492 if (RT_SUCCESS(rc))
3493 {
3494 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3495 && (fFlags2 & X86_PTE_RW))
3496 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3497 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3498 {
3499 void *pvDst;
3500 PGMPAGEMAPLOCK Lock;
3501 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3502 switch (rc)
3503 {
3504 case VINF_SUCCESS:
3505 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3506 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3507 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3508 PGMPhysReleasePageMappingLock(pVM, &Lock);
3509 break;
3510 case VERR_PGM_PHYS_PAGE_RESERVED:
3511 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3512 /* bit bucket */
3513 break;
3514 default:
3515 AssertMsgFailed(("%Rrc\n", rc));
3516 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3517 return rc;
3518 }
3519
3520 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3521 switch (rc)
3522 {
3523 case VINF_SUCCESS:
3524 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3525 PGMPhysReleasePageMappingLock(pVM, &Lock);
3526 break;
3527 case VERR_PGM_PHYS_PAGE_RESERVED:
3528 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3529 /* bit bucket */
3530 break;
3531 default:
3532 AssertMsgFailed(("%Rrc\n", rc));
3533 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3534 return rc;
3535 }
3536
3537 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3538 {
3539 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3540 AssertRC(rc);
3541 }
3542 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3543 {
3544 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3545 AssertRC(rc);
3546 }
3547 return VINF_SUCCESS;
3548 }
3549 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3550 GCPtrDst += cb1; /* fault on the 2nd page. */
3551 rc = VERR_ACCESS_DENIED;
3552 }
3553 else
3554 GCPtrDst += cb1; /* fault on the 2nd page. */
3555 }
3556 }
3557
3558 /*
3559 * Raise a #PF if we're allowed to do that.
3560 */
3561 /* Calc the error bits. */
3562 uint32_t uErr;
3563 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3564 switch (rc)
3565 {
3566 case VINF_SUCCESS:
3567 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3568 rc = VERR_ACCESS_DENIED;
3569 break;
3570
3571 case VERR_ACCESS_DENIED:
3572 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3573 break;
3574
3575 case VERR_PAGE_NOT_PRESENT:
3576 case VERR_PAGE_TABLE_NOT_PRESENT:
3577 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3578 break;
3579
3580 default:
3581 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3582 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3583 return rc;
3584 }
3585 if (fRaiseTrap)
3586 {
3587 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3588 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3589 }
3590 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3591 return rc;
3592}
3593
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette