VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 26685

Last change on this file since 26685 was 26685, checked in by vboxsync, 15 years ago

Large page changes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 125.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 26685 2010-02-22 17:48:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "../PGMInternal.h"
33#include <VBox/vm.h>
34#include "../PGMInline.h"
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <iprt/assert.h>
38#include <iprt/string.h>
39#include <iprt/asm.h>
40#include <VBox/log.h>
41#ifdef IN_RING3
42# include <iprt/thread.h>
43#endif
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49/** Enable the physical TLB. */
50#define PGM_WITH_PHYS_TLB
51
52
53
54#ifndef IN_RING3
55
56/**
57 * \#PF Handler callback for Guest ROM range write access.
58 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
59 *
60 * @returns VBox status code (appropritate for trap handling and GC return).
61 * @param pVM VM Handle.
62 * @param uErrorCode CPU Error code.
63 * @param pRegFrame Trap register frame.
64 * @param pvFault The fault address (cr2).
65 * @param GCPhysFault The GC physical address corresponding to pvFault.
66 * @param pvUser User argument. Pointer to the ROM range structure.
67 */
68VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
69{
70 int rc;
71 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
72 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
73 PVMCPU pVCpu = VMMGetCpu(pVM);
74
75 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
76 switch (pRom->aPages[iPage].enmProt)
77 {
78 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
79 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
80 {
81 /*
82 * If it's a simple instruction which doesn't change the cpu state
83 * we will simply skip it. Otherwise we'll have to defer it to REM.
84 */
85 uint32_t cbOp;
86 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
87 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
88 if ( RT_SUCCESS(rc)
89 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
90 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
91 {
92 switch (pDis->opcode)
93 {
94 /** @todo Find other instructions we can safely skip, possibly
95 * adding this kind of detection to DIS or EM. */
96 case OP_MOV:
97 pRegFrame->rip += cbOp;
98 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
99 return VINF_SUCCESS;
100 }
101 }
102 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
103 return rc;
104 break;
105 }
106
107 case PGMROMPROT_READ_RAM_WRITE_RAM:
108 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
109 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
110 AssertRC(rc);
111 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
112
113 case PGMROMPROT_READ_ROM_WRITE_RAM:
114 /* Handle it in ring-3 because it's *way* easier there. */
115 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
116 break;
117
118 default:
119 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
120 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
121 VERR_INTERNAL_ERROR);
122 }
123
124 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
125 return VINF_EM_RAW_EMULATE_INSTR;
126}
127
128#endif /* IN_RING3 */
129
130/**
131 * Checks if Address Gate 20 is enabled or not.
132 *
133 * @returns true if enabled.
134 * @returns false if disabled.
135 * @param pVCpu VMCPU handle.
136 */
137VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
138{
139 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
140 return pVCpu->pgm.s.fA20Enabled;
141}
142
143
144/**
145 * Validates a GC physical address.
146 *
147 * @returns true if valid.
148 * @returns false if invalid.
149 * @param pVM The VM handle.
150 * @param GCPhys The physical address to validate.
151 */
152VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
153{
154 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
155 return pPage != NULL;
156}
157
158
159/**
160 * Checks if a GC physical address is a normal page,
161 * i.e. not ROM, MMIO or reserved.
162 *
163 * @returns true if normal.
164 * @returns false if invalid, ROM, MMIO or reserved page.
165 * @param pVM The VM handle.
166 * @param GCPhys The physical address to check.
167 */
168VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
169{
170 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
171 return pPage
172 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
173}
174
175
176/**
177 * Converts a GC physical address to a HC physical address.
178 *
179 * @returns VINF_SUCCESS on success.
180 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
181 * page but has no physical backing.
182 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
183 * GC physical address.
184 *
185 * @param pVM The VM handle.
186 * @param GCPhys The GC physical address to convert.
187 * @param pHCPhys Where to store the HC physical address on success.
188 */
189VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
190{
191 pgmLock(pVM);
192 PPGMPAGE pPage;
193 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
194 if (RT_SUCCESS(rc))
195 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
196 pgmUnlock(pVM);
197 return rc;
198}
199
200
201/**
202 * Invalidates all page mapping TLBs.
203 *
204 * @param pVM The VM handle.
205 */
206VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
207{
208 pgmLock(pVM);
209 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushes);
210 /* Clear the shared R0/R3 TLB completely. */
211 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
212 {
213 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
214 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
215 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
216 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
217 }
218 /* @todo clear the RC TLB whenever we add it. */
219 pgmUnlock(pVM);
220}
221
222/**
223 * Invalidates a page mapping TLB entry
224 *
225 * @param pVM The VM handle.
226 * @param GCPhys GCPhys entry to flush
227 */
228VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
229{
230 Assert(PGMIsLocked(pVM));
231
232 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushEntry);
233 /* Clear the shared R0/R3 TLB entry. */
234#ifdef IN_RC
235 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
236 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
237 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
238 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
239 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
240#else
241 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
242 pTlbe->GCPhys = NIL_RTGCPHYS;
243 pTlbe->pPage = 0;
244 pTlbe->pMap = 0;
245 pTlbe->pv = 0;
246#endif
247 /* @todo clear the RC TLB whenever we add it. */
248}
249
250/**
251 * Makes sure that there is at least one handy page ready for use.
252 *
253 * This will also take the appropriate actions when reaching water-marks.
254 *
255 * @returns VBox status code.
256 * @retval VINF_SUCCESS on success.
257 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
258 *
259 * @param pVM The VM handle.
260 *
261 * @remarks Must be called from within the PGM critical section. It may
262 * nip back to ring-3/0 in some cases.
263 */
264static int pgmPhysEnsureHandyPage(PVM pVM)
265{
266 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
267
268 /*
269 * Do we need to do anything special?
270 */
271#ifdef IN_RING3
272 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
273#else
274 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
275#endif
276 {
277 /*
278 * Allocate pages only if we're out of them, or in ring-3, almost out.
279 */
280#ifdef IN_RING3
281 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
282#else
283 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
284#endif
285 {
286 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
287 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
288#ifdef IN_RING3
289 int rc = PGMR3PhysAllocateHandyPages(pVM);
290#else
291 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
292#endif
293 if (RT_UNLIKELY(rc != VINF_SUCCESS))
294 {
295 if (RT_FAILURE(rc))
296 return rc;
297 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
298 if (!pVM->pgm.s.cHandyPages)
299 {
300 LogRel(("PGM: no more handy pages!\n"));
301 return VERR_EM_NO_MEMORY;
302 }
303 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
304 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
305#ifdef IN_RING3
306 REMR3NotifyFF(pVM);
307#else
308 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
309#endif
310 }
311 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
312 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
313 ("%u\n", pVM->pgm.s.cHandyPages),
314 VERR_INTERNAL_ERROR);
315 }
316 else
317 {
318 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
319 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
320#ifndef IN_RING3
321 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
322 {
323 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
324 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
325 }
326#endif
327 }
328 }
329
330 return VINF_SUCCESS;
331}
332
333
334/**
335 * Replace a zero or shared page with new page that we can write to.
336 *
337 * @returns The following VBox status codes.
338 * @retval VINF_SUCCESS on success, pPage is modified.
339 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
340 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
341 *
342 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
343 *
344 * @param pVM The VM address.
345 * @param pPage The physical page tracking structure. This will
346 * be modified on success.
347 * @param GCPhys The address of the page.
348 *
349 * @remarks Must be called from within the PGM critical section. It may
350 * nip back to ring-3/0 in some cases.
351 *
352 * @remarks This function shouldn't really fail, however if it does
353 * it probably means we've screwed up the size of handy pages and/or
354 * the low-water mark. Or, that some device I/O is causing a lot of
355 * pages to be allocated while while the host is in a low-memory
356 * condition. This latter should be handled elsewhere and in a more
357 * controlled manner, it's on the @bugref{3170} todo list...
358 */
359int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
360{
361 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
362
363 /*
364 * Prereqs.
365 */
366 Assert(PGMIsLocked(pVM));
367 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
368 Assert(!PGM_PAGE_IS_MMIO(pPage));
369
370 if (PGMIsUsingLargePages(pVM))
371 {
372 RTHCPHYS HCPhysDummy;
373
374 int rc = pgmPhysAllocLargePage(pVM, GCPhys, &HCPhysDummy);
375 if (rc == VINF_SUCCESS)
376 return rc;
377
378 /* fall back to 4kb pages. */
379 }
380
381 /*
382 * Flush any shadow page table mappings of the page.
383 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
384 */
385 bool fFlushTLBs = false;
386 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
387 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
388
389 /*
390 * Ensure that we've got a page handy, take it and use it.
391 */
392 int rc2 = pgmPhysEnsureHandyPage(pVM);
393 if (RT_FAILURE(rc2))
394 {
395 if (fFlushTLBs)
396 PGM_INVL_ALL_VCPU_TLBS(pVM);
397 Assert(rc2 == VERR_EM_NO_MEMORY);
398 return rc2;
399 }
400 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
401 Assert(PGMIsLocked(pVM));
402 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
403 Assert(!PGM_PAGE_IS_MMIO(pPage));
404
405 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
406 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
407 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
408 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
409 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
410 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
411
412 /*
413 * There are one or two action to be taken the next time we allocate handy pages:
414 * - Tell the GMM (global memory manager) what the page is being used for.
415 * (Speeds up replacement operations - sharing and defragmenting.)
416 * - If the current backing is shared, it must be freed.
417 */
418 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
419 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
420
421 if (PGM_PAGE_IS_SHARED(pPage))
422 {
423 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
424 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
425 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
426
427 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
428 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
429 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
430 pVM->pgm.s.cSharedPages--;
431 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
432 }
433 else
434 {
435 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
436 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
437 pVM->pgm.s.cZeroPages--;
438 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
439 }
440
441 /*
442 * Do the PGMPAGE modifications.
443 */
444 pVM->pgm.s.cPrivatePages++;
445 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
446 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
447 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
448 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
449 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
450
451 if ( fFlushTLBs
452 && rc != VINF_PGM_GCPHYS_ALIASED)
453 PGM_INVL_ALL_VCPU_TLBS(pVM);
454 return rc;
455}
456
457/**
458 * Replace a 2 MB range of zero pages with new pages that we can write to.
459 *
460 * @returns The following VBox status codes.
461 * @retval VINF_SUCCESS on success, pPage is modified.
462 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
463 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
464 *
465 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
466 *
467 * @param pVM The VM address.
468 * @param GCPhys The address of the page.
469 * @param pHCPhys Pointer to HC physical address (out)
470 *
471 * @remarks Must be called from within the PGM critical section. It may
472 * nip back to ring-3/0 in some cases.
473 */
474int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS *pHCPhys)
475{
476 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
477 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
478
479 /*
480 * Prereqs.
481 */
482 Assert(PGMIsLocked(pVM));
483 Assert(PGMIsUsingLargePages(pVM));
484 Assert((GCPhys & X86_PD_PAE_MASK) == 0);
485 AssertPtr(pHCPhys);
486
487 PPGMPAGE pPage;
488 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
489 if ( RT_SUCCESS(rc)
490 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
491 {
492 RTHCPHYS HCPhys = NIL_RTHCPHYS;
493 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
494
495 if (uPDEType == PGM_PAGE_PDE_TYPE_PDE)
496 {
497 /* Previously allocated 2 MB range can be reused. */
498 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
499
500 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
501 return VINF_SUCCESS;
502 }
503 else
504 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
505 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
506 {
507 unsigned iPage;
508
509 GCPhys = GCPhysBase;
510
511 /* Lazy approach: check all pages in the 2 MB range.
512 * The whole range must be ram and unallocated
513 */
514 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
515 {
516 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
517 if ( RT_FAILURE(rc)
518 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
519 || PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
520 {
521 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
522 break;
523 }
524 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
525 GCPhys += PAGE_SIZE;
526 }
527 /* Fetch the start page of the 2 MB range again. */
528 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
529 AssertRC(rc); /* can't fail */
530
531 if (iPage != _2M/PAGE_SIZE)
532 {
533 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
534 STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
535 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
536 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
537 }
538 else
539 {
540#ifdef IN_RING3
541 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
542#else
543 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
544#endif
545 if (RT_SUCCESS(rc))
546 {
547 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
548 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
549 STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageUsed);
550 return VINF_SUCCESS;
551 }
552 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
553
554 /* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
555 PGMSetLargePageUsage(pVM, false);
556 return rc;
557 }
558 }
559 }
560 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
561}
562
563/**
564 * Deal with a write monitored page.
565 *
566 * @returns VBox strict status code.
567 *
568 * @param pVM The VM address.
569 * @param pPage The physical page tracking structure.
570 *
571 * @remarks Called from within the PGM critical section.
572 */
573void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
574{
575 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
576 PGM_PAGE_SET_WRITTEN_TO(pPage);
577 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
578 Assert(pVM->pgm.s.cMonitoredPages > 0);
579 pVM->pgm.s.cMonitoredPages--;
580 pVM->pgm.s.cWrittenToPages++;
581}
582
583
584/**
585 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
586 *
587 * @returns VBox strict status code.
588 * @retval VINF_SUCCESS on success.
589 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
590 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
591 *
592 * @param pVM The VM address.
593 * @param pPage The physical page tracking structure.
594 * @param GCPhys The address of the page.
595 *
596 * @remarks Called from within the PGM critical section.
597 */
598int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
599{
600 switch (PGM_PAGE_GET_STATE(pPage))
601 {
602 case PGM_PAGE_STATE_WRITE_MONITORED:
603 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
604 /* fall thru */
605 default: /* to shut up GCC */
606 case PGM_PAGE_STATE_ALLOCATED:
607 return VINF_SUCCESS;
608
609 /*
610 * Zero pages can be dummy pages for MMIO or reserved memory,
611 * so we need to check the flags before joining cause with
612 * shared page replacement.
613 */
614 case PGM_PAGE_STATE_ZERO:
615 if (PGM_PAGE_IS_MMIO(pPage))
616 return VERR_PGM_PHYS_PAGE_RESERVED;
617 /* fall thru */
618 case PGM_PAGE_STATE_SHARED:
619 return pgmPhysAllocPage(pVM, pPage, GCPhys);
620 }
621}
622
623
624/**
625 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
626 *
627 * @returns VBox strict status code.
628 * @retval VINF_SUCCESS on success.
629 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
630 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
631 *
632 * @param pVM The VM address.
633 * @param pPage The physical page tracking structure.
634 * @param GCPhys The address of the page.
635 */
636int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
637{
638 int rc = pgmLock(pVM);
639 if (RT_SUCCESS(rc))
640 {
641 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
642 pgmUnlock(pVM);
643 }
644 return rc;
645}
646
647
648/**
649 * Internal usage: Map the page specified by its GMM ID.
650 *
651 * This is similar to pgmPhysPageMap
652 *
653 * @returns VBox status code.
654 *
655 * @param pVM The VM handle.
656 * @param idPage The Page ID.
657 * @param HCPhys The physical address (for RC).
658 * @param ppv Where to store the mapping address.
659 *
660 * @remarks Called from within the PGM critical section. The mapping is only
661 * valid while your inside this section.
662 */
663int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
664{
665 /*
666 * Validation.
667 */
668 Assert(PGMIsLocked(pVM));
669 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
670 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
671 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
672
673#ifdef IN_RC
674 /*
675 * Map it by HCPhys.
676 */
677 return PGMDynMapHCPage(pVM, HCPhys, ppv);
678
679#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
680 /*
681 * Map it by HCPhys.
682 */
683 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
684
685#else
686 /*
687 * Find/make Chunk TLB entry for the mapping chunk.
688 */
689 PPGMCHUNKR3MAP pMap;
690 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
691 if (pTlbe->idChunk == idChunk)
692 {
693 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
694 pMap = pTlbe->pChunk;
695 }
696 else
697 {
698 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
699
700 /*
701 * Find the chunk, map it if necessary.
702 */
703 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
704 if (!pMap)
705 {
706# ifdef IN_RING0
707 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
708 AssertRCReturn(rc, rc);
709 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
710 Assert(pMap);
711# else
712 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
713 if (RT_FAILURE(rc))
714 return rc;
715# endif
716 }
717
718 /*
719 * Enter it into the Chunk TLB.
720 */
721 pTlbe->idChunk = idChunk;
722 pTlbe->pChunk = pMap;
723 pMap->iAge = 0;
724 }
725
726 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
727 return VINF_SUCCESS;
728#endif
729}
730
731
732/**
733 * Maps a page into the current virtual address space so it can be accessed.
734 *
735 * @returns VBox status code.
736 * @retval VINF_SUCCESS on success.
737 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
738 *
739 * @param pVM The VM address.
740 * @param pPage The physical page tracking structure.
741 * @param GCPhys The address of the page.
742 * @param ppMap Where to store the address of the mapping tracking structure.
743 * @param ppv Where to store the mapping address of the page. The page
744 * offset is masked off!
745 *
746 * @remarks Called from within the PGM critical section.
747 */
748static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
749{
750 Assert(PGMIsLocked(pVM));
751
752#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
753 /*
754 * Just some sketchy GC/R0-darwin code.
755 */
756 *ppMap = NULL;
757 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
758 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
759# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
760 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
761# else
762 PGMDynMapHCPage(pVM, HCPhys, ppv);
763# endif
764 return VINF_SUCCESS;
765
766#else /* IN_RING3 || IN_RING0 */
767
768
769 /*
770 * Special case: ZERO and MMIO2 pages.
771 */
772 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
773 if (idChunk == NIL_GMM_CHUNKID)
774 {
775 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
776 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
777 {
778 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
779 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
780 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
781 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
782 }
783 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
784 {
785 /** @todo deal with aliased MMIO2 pages somehow...
786 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
787 * them, that would also avoid this mess. It would actually be kind of
788 * elegant... */
789 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
790 }
791 else
792 {
793 /** @todo handle MMIO2 */
794 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
795 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
796 ("pPage=%R[pgmpage]\n", pPage),
797 VERR_INTERNAL_ERROR_2);
798 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
799 }
800 *ppMap = NULL;
801 return VINF_SUCCESS;
802 }
803
804 /*
805 * Find/make Chunk TLB entry for the mapping chunk.
806 */
807 PPGMCHUNKR3MAP pMap;
808 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
809 if (pTlbe->idChunk == idChunk)
810 {
811 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
812 pMap = pTlbe->pChunk;
813 }
814 else
815 {
816 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
817
818 /*
819 * Find the chunk, map it if necessary.
820 */
821 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
822 if (!pMap)
823 {
824#ifdef IN_RING0
825 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
826 AssertRCReturn(rc, rc);
827 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
828 Assert(pMap);
829#else
830 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
831 if (RT_FAILURE(rc))
832 return rc;
833#endif
834 }
835
836 /*
837 * Enter it into the Chunk TLB.
838 */
839 pTlbe->idChunk = idChunk;
840 pTlbe->pChunk = pMap;
841 pMap->iAge = 0;
842 }
843
844 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
845 *ppMap = pMap;
846 return VINF_SUCCESS;
847#endif /* IN_RING3 */
848}
849
850
851/**
852 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
853 *
854 * This is typically used is paths where we cannot use the TLB methods (like ROM
855 * pages) or where there is no point in using them since we won't get many hits.
856 *
857 * @returns VBox strict status code.
858 * @retval VINF_SUCCESS on success.
859 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
860 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
861 *
862 * @param pVM The VM address.
863 * @param pPage The physical page tracking structure.
864 * @param GCPhys The address of the page.
865 * @param ppv Where to store the mapping address of the page. The page
866 * offset is masked off!
867 *
868 * @remarks Called from within the PGM critical section. The mapping is only
869 * valid while your inside this section.
870 */
871int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
872{
873 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
874 if (RT_SUCCESS(rc))
875 {
876 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
877 PPGMPAGEMAP pMapIgnore;
878 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
879 if (RT_FAILURE(rc2)) /* preserve rc */
880 rc = rc2;
881 }
882 return rc;
883}
884
885
886/**
887 * Maps a page into the current virtual address space so it can be accessed for
888 * both writing and reading.
889 *
890 * This is typically used is paths where we cannot use the TLB methods (like ROM
891 * pages) or where there is no point in using them since we won't get many hits.
892 *
893 * @returns VBox status code.
894 * @retval VINF_SUCCESS on success.
895 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
896 *
897 * @param pVM The VM address.
898 * @param pPage The physical page tracking structure. Must be in the
899 * allocated state.
900 * @param GCPhys The address of the page.
901 * @param ppv Where to store the mapping address of the page. The page
902 * offset is masked off!
903 *
904 * @remarks Called from within the PGM critical section. The mapping is only
905 * valid while your inside this section.
906 */
907int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
908{
909 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
910 PPGMPAGEMAP pMapIgnore;
911 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
912}
913
914
915/**
916 * Maps a page into the current virtual address space so it can be accessed for
917 * reading.
918 *
919 * This is typically used is paths where we cannot use the TLB methods (like ROM
920 * pages) or where there is no point in using them since we won't get many hits.
921 *
922 * @returns VBox status code.
923 * @retval VINF_SUCCESS on success.
924 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
925 *
926 * @param pVM The VM address.
927 * @param pPage The physical page tracking structure.
928 * @param GCPhys The address of the page.
929 * @param ppv Where to store the mapping address of the page. The page
930 * offset is masked off!
931 *
932 * @remarks Called from within the PGM critical section. The mapping is only
933 * valid while your inside this section.
934 */
935int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
936{
937 PPGMPAGEMAP pMapIgnore;
938 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
939}
940
941
942#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
943/**
944 * Load a guest page into the ring-3 physical TLB.
945 *
946 * @returns VBox status code.
947 * @retval VINF_SUCCESS on success
948 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
949 * @param pPGM The PGM instance pointer.
950 * @param GCPhys The guest physical address in question.
951 */
952int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
953{
954 Assert(PGMIsLocked(PGM2VM(pPGM)));
955 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
956
957 /*
958 * Find the ram range.
959 * 99.8% of requests are expected to be in the first range.
960 */
961 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
962 RTGCPHYS off = GCPhys - pRam->GCPhys;
963 if (RT_UNLIKELY(off >= pRam->cb))
964 {
965 do
966 {
967 pRam = pRam->CTX_SUFF(pNext);
968 if (!pRam)
969 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
970 off = GCPhys - pRam->GCPhys;
971 } while (off >= pRam->cb);
972 }
973
974 /*
975 * Map the page.
976 * Make a special case for the zero page as it is kind of special.
977 */
978 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
979 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
980 if (!PGM_PAGE_IS_ZERO(pPage))
981 {
982 void *pv;
983 PPGMPAGEMAP pMap;
984 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
985 if (RT_FAILURE(rc))
986 return rc;
987 pTlbe->pMap = pMap;
988 pTlbe->pv = pv;
989 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
990 }
991 else
992 {
993 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
994 pTlbe->pMap = NULL;
995 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
996 }
997#ifdef PGM_WITH_PHYS_TLB
998 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
999#else
1000 pTlbe->GCPhys = NIL_RTGCPHYS;
1001#endif
1002 pTlbe->pPage = pPage;
1003 return VINF_SUCCESS;
1004}
1005
1006
1007/**
1008 * Load a guest page into the ring-3 physical TLB.
1009 *
1010 * @returns VBox status code.
1011 * @retval VINF_SUCCESS on success
1012 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1013 *
1014 * @param pPGM The PGM instance pointer.
1015 * @param pPage Pointer to the PGMPAGE structure corresponding to
1016 * GCPhys.
1017 * @param GCPhys The guest physical address in question.
1018 */
1019int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1020{
1021 Assert(PGMIsLocked(PGM2VM(pPGM)));
1022 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
1023
1024 /*
1025 * Map the page.
1026 * Make a special case for the zero page as it is kind of special.
1027 */
1028 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1029 if (!PGM_PAGE_IS_ZERO(pPage))
1030 {
1031 void *pv;
1032 PPGMPAGEMAP pMap;
1033 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
1034 if (RT_FAILURE(rc))
1035 return rc;
1036 pTlbe->pMap = pMap;
1037 pTlbe->pv = pv;
1038 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1039 }
1040 else
1041 {
1042 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
1043 pTlbe->pMap = NULL;
1044 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
1045 }
1046#ifdef PGM_WITH_PHYS_TLB
1047 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1048#else
1049 pTlbe->GCPhys = NIL_RTGCPHYS;
1050#endif
1051 pTlbe->pPage = pPage;
1052 return VINF_SUCCESS;
1053}
1054#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1055
1056
1057/**
1058 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1059 * own the PGM lock and therefore not need to lock the mapped page.
1060 *
1061 * @returns VBox status code.
1062 * @retval VINF_SUCCESS on success.
1063 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1064 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1065 *
1066 * @param pVM The VM handle.
1067 * @param GCPhys The guest physical address of the page that should be mapped.
1068 * @param pPage Pointer to the PGMPAGE structure for the page.
1069 * @param ppv Where to store the address corresponding to GCPhys.
1070 *
1071 * @internal
1072 */
1073int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1074{
1075 int rc;
1076 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1077 Assert(PGMIsLocked(pVM));
1078
1079 /*
1080 * Make sure the page is writable.
1081 */
1082 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1083 {
1084 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1085 if (RT_FAILURE(rc))
1086 return rc;
1087 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1088 }
1089 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1090
1091 /*
1092 * Get the mapping address.
1093 */
1094#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1095 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
1096#else
1097 PPGMPAGEMAPTLBE pTlbe;
1098 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1099 if (RT_FAILURE(rc))
1100 return rc;
1101 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1102#endif
1103 return VINF_SUCCESS;
1104}
1105
1106
1107/**
1108 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1109 * own the PGM lock and therefore not need to lock the mapped page.
1110 *
1111 * @returns VBox status code.
1112 * @retval VINF_SUCCESS on success.
1113 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1114 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1115 *
1116 * @param pVM The VM handle.
1117 * @param GCPhys The guest physical address of the page that should be mapped.
1118 * @param pPage Pointer to the PGMPAGE structure for the page.
1119 * @param ppv Where to store the address corresponding to GCPhys.
1120 *
1121 * @internal
1122 */
1123int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
1124{
1125 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1126 Assert(PGMIsLocked(pVM));
1127 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1128
1129 /*
1130 * Get the mapping address.
1131 */
1132#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1133 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1134#else
1135 PPGMPAGEMAPTLBE pTlbe;
1136 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1137 if (RT_FAILURE(rc))
1138 return rc;
1139 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1140#endif
1141 return VINF_SUCCESS;
1142}
1143
1144
1145/**
1146 * Requests the mapping of a guest page into the current context.
1147 *
1148 * This API should only be used for very short term, as it will consume
1149 * scarse resources (R0 and GC) in the mapping cache. When you're done
1150 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1151 *
1152 * This API will assume your intention is to write to the page, and will
1153 * therefore replace shared and zero pages. If you do not intend to modify
1154 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1155 *
1156 * @returns VBox status code.
1157 * @retval VINF_SUCCESS on success.
1158 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1159 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1160 *
1161 * @param pVM The VM handle.
1162 * @param GCPhys The guest physical address of the page that should be mapped.
1163 * @param ppv Where to store the address corresponding to GCPhys.
1164 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1165 *
1166 * @remarks The caller is responsible for dealing with access handlers.
1167 * @todo Add an informational return code for pages with access handlers?
1168 *
1169 * @remark Avoid calling this API from within critical sections (other than the
1170 * PGM one) because of the deadlock risk. External threads may need to
1171 * delegate jobs to the EMTs.
1172 * @thread Any thread.
1173 */
1174VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1175{
1176#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1177
1178 /*
1179 * Find the page and make sure it's writable.
1180 */
1181 PPGMPAGE pPage;
1182 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1183 if (RT_SUCCESS(rc))
1184 {
1185 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1186 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1187 if (RT_SUCCESS(rc))
1188 {
1189 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1190# if 0
1191 pLock->pvMap = 0;
1192 pLock->pvPage = pPage;
1193# else
1194 pLock->u32Dummy = UINT32_MAX;
1195# endif
1196 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1197 rc = VINF_SUCCESS;
1198 }
1199 }
1200
1201#else /* IN_RING3 || IN_RING0 */
1202 int rc = pgmLock(pVM);
1203 AssertRCReturn(rc, rc);
1204
1205 /*
1206 * Query the Physical TLB entry for the page (may fail).
1207 */
1208 PPGMPAGEMAPTLBE pTlbe;
1209 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1210 if (RT_SUCCESS(rc))
1211 {
1212 /*
1213 * If the page is shared, the zero page, or being write monitored
1214 * it must be converted to an page that's writable if possible.
1215 */
1216 PPGMPAGE pPage = pTlbe->pPage;
1217 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1218 {
1219 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1220 if (RT_SUCCESS(rc))
1221 {
1222 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1223 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1224 }
1225 }
1226 if (RT_SUCCESS(rc))
1227 {
1228 /*
1229 * Now, just perform the locking and calculate the return address.
1230 */
1231 PPGMPAGEMAP pMap = pTlbe->pMap;
1232 if (pMap)
1233 pMap->cRefs++;
1234
1235 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1236 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1237 {
1238 if (cLocks == 0)
1239 pVM->pgm.s.cWriteLockedPages++;
1240 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1241 }
1242 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1243 {
1244 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1245 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1246 if (pMap)
1247 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1248 }
1249
1250 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1251 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1252 pLock->pvMap = pMap;
1253 }
1254 }
1255
1256 pgmUnlock(pVM);
1257#endif /* IN_RING3 || IN_RING0 */
1258 return rc;
1259}
1260
1261
1262/**
1263 * Requests the mapping of a guest page into the current context.
1264 *
1265 * This API should only be used for very short term, as it will consume
1266 * scarse resources (R0 and GC) in the mapping cache. When you're done
1267 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1268 *
1269 * @returns VBox status code.
1270 * @retval VINF_SUCCESS on success.
1271 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1272 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1273 *
1274 * @param pVM The VM handle.
1275 * @param GCPhys The guest physical address of the page that should be mapped.
1276 * @param ppv Where to store the address corresponding to GCPhys.
1277 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1278 *
1279 * @remarks The caller is responsible for dealing with access handlers.
1280 * @todo Add an informational return code for pages with access handlers?
1281 *
1282 * @remark Avoid calling this API from within critical sections (other than
1283 * the PGM one) because of the deadlock risk.
1284 * @thread Any thread.
1285 */
1286VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1287{
1288#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1289
1290 /*
1291 * Find the page and make sure it's readable.
1292 */
1293 PPGMPAGE pPage;
1294 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1295 if (RT_SUCCESS(rc))
1296 {
1297 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1298 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1299 else
1300 {
1301 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1302# if 0
1303 pLock->pvMap = 0;
1304 pLock->pvPage = pPage;
1305# else
1306 pLock->u32Dummy = UINT32_MAX;
1307# endif
1308 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1309 rc = VINF_SUCCESS;
1310 }
1311 }
1312
1313#else /* IN_RING3 || IN_RING0 */
1314 int rc = pgmLock(pVM);
1315 AssertRCReturn(rc, rc);
1316
1317 /*
1318 * Query the Physical TLB entry for the page (may fail).
1319 */
1320 PPGMPAGEMAPTLBE pTlbe;
1321 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1322 if (RT_SUCCESS(rc))
1323 {
1324 /* MMIO pages doesn't have any readable backing. */
1325 PPGMPAGE pPage = pTlbe->pPage;
1326 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1327 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1328 else
1329 {
1330 /*
1331 * Now, just perform the locking and calculate the return address.
1332 */
1333 PPGMPAGEMAP pMap = pTlbe->pMap;
1334 if (pMap)
1335 pMap->cRefs++;
1336
1337 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1338 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1339 {
1340 if (cLocks == 0)
1341 pVM->pgm.s.cReadLockedPages++;
1342 PGM_PAGE_INC_READ_LOCKS(pPage);
1343 }
1344 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1345 {
1346 PGM_PAGE_INC_READ_LOCKS(pPage);
1347 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1348 if (pMap)
1349 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1350 }
1351
1352 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1353 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1354 pLock->pvMap = pMap;
1355 }
1356 }
1357
1358 pgmUnlock(pVM);
1359#endif /* IN_RING3 || IN_RING0 */
1360 return rc;
1361}
1362
1363
1364/**
1365 * Requests the mapping of a guest page given by virtual address into the current context.
1366 *
1367 * This API should only be used for very short term, as it will consume
1368 * scarse resources (R0 and GC) in the mapping cache. When you're done
1369 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1370 *
1371 * This API will assume your intention is to write to the page, and will
1372 * therefore replace shared and zero pages. If you do not intend to modify
1373 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1374 *
1375 * @returns VBox status code.
1376 * @retval VINF_SUCCESS on success.
1377 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1378 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1379 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1380 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1381 *
1382 * @param pVCpu VMCPU handle.
1383 * @param GCPhys The guest physical address of the page that should be mapped.
1384 * @param ppv Where to store the address corresponding to GCPhys.
1385 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1386 *
1387 * @remark Avoid calling this API from within critical sections (other than
1388 * the PGM one) because of the deadlock risk.
1389 * @thread EMT
1390 */
1391VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1392{
1393 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1394 RTGCPHYS GCPhys;
1395 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1396 if (RT_SUCCESS(rc))
1397 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1398 return rc;
1399}
1400
1401
1402/**
1403 * Requests the mapping of a guest page given by virtual address into the current context.
1404 *
1405 * This API should only be used for very short term, as it will consume
1406 * scarse resources (R0 and GC) in the mapping cache. When you're done
1407 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1408 *
1409 * @returns VBox status code.
1410 * @retval VINF_SUCCESS on success.
1411 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1412 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1413 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1414 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1415 *
1416 * @param pVCpu VMCPU handle.
1417 * @param GCPhys The guest physical address of the page that should be mapped.
1418 * @param ppv Where to store the address corresponding to GCPhys.
1419 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1420 *
1421 * @remark Avoid calling this API from within critical sections (other than
1422 * the PGM one) because of the deadlock risk.
1423 * @thread EMT
1424 */
1425VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1426{
1427 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1428 RTGCPHYS GCPhys;
1429 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1430 if (RT_SUCCESS(rc))
1431 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1432 return rc;
1433}
1434
1435
1436/**
1437 * Release the mapping of a guest page.
1438 *
1439 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1440 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1441 *
1442 * @param pVM The VM handle.
1443 * @param pLock The lock structure initialized by the mapping function.
1444 */
1445VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1446{
1447#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1448 /* currently nothing to do here. */
1449 Assert(pLock->u32Dummy == UINT32_MAX);
1450 pLock->u32Dummy = 0;
1451
1452#else /* IN_RING3 */
1453 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1454 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1455 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1456
1457 pLock->uPageAndType = 0;
1458 pLock->pvMap = NULL;
1459
1460 pgmLock(pVM);
1461 if (fWriteLock)
1462 {
1463 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1464 Assert(cLocks > 0);
1465 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1466 {
1467 if (cLocks == 1)
1468 {
1469 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1470 pVM->pgm.s.cWriteLockedPages--;
1471 }
1472 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1473 }
1474
1475 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1476 {
1477 PGM_PAGE_SET_WRITTEN_TO(pPage);
1478 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1479 Assert(pVM->pgm.s.cMonitoredPages > 0);
1480 pVM->pgm.s.cMonitoredPages--;
1481 pVM->pgm.s.cWrittenToPages++;
1482 }
1483 }
1484 else
1485 {
1486 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1487 Assert(cLocks > 0);
1488 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1489 {
1490 if (cLocks == 1)
1491 {
1492 Assert(pVM->pgm.s.cReadLockedPages > 0);
1493 pVM->pgm.s.cReadLockedPages--;
1494 }
1495 PGM_PAGE_DEC_READ_LOCKS(pPage);
1496 }
1497 }
1498
1499 if (pMap)
1500 {
1501 Assert(pMap->cRefs >= 1);
1502 pMap->cRefs--;
1503 pMap->iAge = 0;
1504 }
1505 pgmUnlock(pVM);
1506#endif /* IN_RING3 */
1507}
1508
1509
1510/**
1511 * Converts a GC physical address to a HC ring-3 pointer.
1512 *
1513 * @returns VINF_SUCCESS on success.
1514 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1515 * page but has no physical backing.
1516 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1517 * GC physical address.
1518 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1519 * a dynamic ram chunk boundary
1520 *
1521 * @param pVM The VM handle.
1522 * @param GCPhys The GC physical address to convert.
1523 * @param cbRange Physical range
1524 * @param pR3Ptr Where to store the R3 pointer on success.
1525 *
1526 * @deprecated Avoid when possible!
1527 */
1528VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1529{
1530/** @todo this is kind of hacky and needs some more work. */
1531#ifndef DEBUG_sandervl
1532 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1533#endif
1534
1535 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1536#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1537 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1538#else
1539 pgmLock(pVM);
1540
1541 PPGMRAMRANGE pRam;
1542 PPGMPAGE pPage;
1543 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1544 if (RT_SUCCESS(rc))
1545 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1546
1547 pgmUnlock(pVM);
1548 Assert(rc <= VINF_SUCCESS);
1549 return rc;
1550#endif
1551}
1552
1553
1554#ifdef VBOX_STRICT
1555/**
1556 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1557 *
1558 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1559 * @param pVM The VM handle.
1560 * @param GCPhys The GC Physical addresss.
1561 * @param cbRange Physical range.
1562 *
1563 * @deprecated Avoid when possible.
1564 */
1565VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1566{
1567 RTR3PTR R3Ptr;
1568 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1569 if (RT_SUCCESS(rc))
1570 return R3Ptr;
1571 return NIL_RTR3PTR;
1572}
1573#endif /* VBOX_STRICT */
1574
1575
1576/**
1577 * Converts a guest pointer to a GC physical address.
1578 *
1579 * This uses the current CR3/CR0/CR4 of the guest.
1580 *
1581 * @returns VBox status code.
1582 * @param pVCpu The VMCPU Handle
1583 * @param GCPtr The guest pointer to convert.
1584 * @param pGCPhys Where to store the GC physical address.
1585 */
1586VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1587{
1588 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1589 if (pGCPhys && RT_SUCCESS(rc))
1590 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1591 return rc;
1592}
1593
1594
1595/**
1596 * Converts a guest pointer to a HC physical address.
1597 *
1598 * This uses the current CR3/CR0/CR4 of the guest.
1599 *
1600 * @returns VBox status code.
1601 * @param pVCpu The VMCPU Handle
1602 * @param GCPtr The guest pointer to convert.
1603 * @param pHCPhys Where to store the HC physical address.
1604 */
1605VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1606{
1607 PVM pVM = pVCpu->CTX_SUFF(pVM);
1608 RTGCPHYS GCPhys;
1609 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1610 if (RT_SUCCESS(rc))
1611 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1612 return rc;
1613}
1614
1615
1616/**
1617 * Converts a guest pointer to a R3 pointer.
1618 *
1619 * This uses the current CR3/CR0/CR4 of the guest.
1620 *
1621 * @returns VBox status code.
1622 * @param pVCpu The VMCPU Handle
1623 * @param GCPtr The guest pointer to convert.
1624 * @param pR3Ptr Where to store the R3 virtual address.
1625 *
1626 * @deprecated Don't use this.
1627 */
1628VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1629{
1630 PVM pVM = pVCpu->CTX_SUFF(pVM);
1631 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1632 RTGCPHYS GCPhys;
1633 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1634 if (RT_SUCCESS(rc))
1635 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1636 return rc;
1637}
1638
1639
1640
1641#undef LOG_GROUP
1642#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1643
1644
1645#ifdef IN_RING3
1646/**
1647 * Cache PGMPhys memory access
1648 *
1649 * @param pVM VM Handle.
1650 * @param pCache Cache structure pointer
1651 * @param GCPhys GC physical address
1652 * @param pbHC HC pointer corresponding to physical page
1653 *
1654 * @thread EMT.
1655 */
1656static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1657{
1658 uint32_t iCacheIndex;
1659
1660 Assert(VM_IS_EMT(pVM));
1661
1662 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1663 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1664
1665 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1666
1667 ASMBitSet(&pCache->aEntries, iCacheIndex);
1668
1669 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1670 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1671}
1672#endif /* IN_RING3 */
1673
1674
1675/**
1676 * Deals with reading from a page with one or more ALL access handlers.
1677 *
1678 * @returns VBox status code. Can be ignored in ring-3.
1679 * @retval VINF_SUCCESS.
1680 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1681 *
1682 * @param pVM The VM handle.
1683 * @param pPage The page descriptor.
1684 * @param GCPhys The physical address to start reading at.
1685 * @param pvBuf Where to put the bits we read.
1686 * @param cb How much to read - less or equal to a page.
1687 */
1688static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1689{
1690 /*
1691 * The most frequent access here is MMIO and shadowed ROM.
1692 * The current code ASSUMES all these access handlers covers full pages!
1693 */
1694
1695 /*
1696 * Whatever we do we need the source page, map it first.
1697 */
1698 const void *pvSrc = NULL;
1699 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1700 if (RT_FAILURE(rc))
1701 {
1702 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1703 GCPhys, pPage, rc));
1704 memset(pvBuf, 0xff, cb);
1705 return VINF_SUCCESS;
1706 }
1707 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1708
1709 /*
1710 * Deal with any physical handlers.
1711 */
1712 PPGMPHYSHANDLER pPhys = NULL;
1713 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1714 {
1715#ifdef IN_RING3
1716 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1717 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1718 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1719 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1720 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1721 Assert(pPhys->CTX_SUFF(pfnHandler));
1722
1723 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1724 void *pvUser = pPhys->CTX_SUFF(pvUser);
1725
1726 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1727 STAM_PROFILE_START(&pPhys->Stat, h);
1728 Assert(PGMIsLockOwner(pVM));
1729 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1730 pgmUnlock(pVM);
1731 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1732 pgmLock(pVM);
1733# ifdef VBOX_WITH_STATISTICS
1734 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1735 if (pPhys)
1736 STAM_PROFILE_STOP(&pPhys->Stat, h);
1737# else
1738 pPhys = NULL; /* might not be valid anymore. */
1739# endif
1740 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1741#else
1742 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1743 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1744 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1745#endif
1746 }
1747
1748 /*
1749 * Deal with any virtual handlers.
1750 */
1751 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1752 {
1753 unsigned iPage;
1754 PPGMVIRTHANDLER pVirt;
1755
1756 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1757 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1758 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1759 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1760 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1761
1762#ifdef IN_RING3
1763 if (pVirt->pfnHandlerR3)
1764 {
1765 if (!pPhys)
1766 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1767 else
1768 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1769 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1770 + (iPage << PAGE_SHIFT)
1771 + (GCPhys & PAGE_OFFSET_MASK);
1772
1773 STAM_PROFILE_START(&pVirt->Stat, h);
1774 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1775 STAM_PROFILE_STOP(&pVirt->Stat, h);
1776 if (rc2 == VINF_SUCCESS)
1777 rc = VINF_SUCCESS;
1778 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1779 }
1780 else
1781 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1782#else
1783 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1784 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1785 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1786#endif
1787 }
1788
1789 /*
1790 * Take the default action.
1791 */
1792 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1793 memcpy(pvBuf, pvSrc, cb);
1794 return rc;
1795}
1796
1797
1798/**
1799 * Read physical memory.
1800 *
1801 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1802 * want to ignore those.
1803 *
1804 * @returns VBox status code. Can be ignored in ring-3.
1805 * @retval VINF_SUCCESS.
1806 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1807 *
1808 * @param pVM VM Handle.
1809 * @param GCPhys Physical address start reading from.
1810 * @param pvBuf Where to put the read bits.
1811 * @param cbRead How many bytes to read.
1812 */
1813VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1814{
1815 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1816 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1817
1818 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1819 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1820
1821 pgmLock(pVM);
1822
1823 /*
1824 * Copy loop on ram ranges.
1825 */
1826 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1827 for (;;)
1828 {
1829 /* Find range. */
1830 while (pRam && GCPhys > pRam->GCPhysLast)
1831 pRam = pRam->CTX_SUFF(pNext);
1832 /* Inside range or not? */
1833 if (pRam && GCPhys >= pRam->GCPhys)
1834 {
1835 /*
1836 * Must work our way thru this page by page.
1837 */
1838 RTGCPHYS off = GCPhys - pRam->GCPhys;
1839 while (off < pRam->cb)
1840 {
1841 unsigned iPage = off >> PAGE_SHIFT;
1842 PPGMPAGE pPage = &pRam->aPages[iPage];
1843 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1844 if (cb > cbRead)
1845 cb = cbRead;
1846
1847 /*
1848 * Any ALL access handlers?
1849 */
1850 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1851 {
1852 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1853 if (RT_FAILURE(rc))
1854 {
1855 pgmUnlock(pVM);
1856 return rc;
1857 }
1858 }
1859 else
1860 {
1861 /*
1862 * Get the pointer to the page.
1863 */
1864 const void *pvSrc;
1865 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1866 if (RT_SUCCESS(rc))
1867 memcpy(pvBuf, pvSrc, cb);
1868 else
1869 {
1870 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1871 pRam->GCPhys + off, pPage, rc));
1872 memset(pvBuf, 0xff, cb);
1873 }
1874 }
1875
1876 /* next page */
1877 if (cb >= cbRead)
1878 {
1879 pgmUnlock(pVM);
1880 return VINF_SUCCESS;
1881 }
1882 cbRead -= cb;
1883 off += cb;
1884 pvBuf = (char *)pvBuf + cb;
1885 } /* walk pages in ram range. */
1886
1887 GCPhys = pRam->GCPhysLast + 1;
1888 }
1889 else
1890 {
1891 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1892
1893 /*
1894 * Unassigned address space.
1895 */
1896 if (!pRam)
1897 break;
1898 size_t cb = pRam->GCPhys - GCPhys;
1899 if (cb >= cbRead)
1900 {
1901 memset(pvBuf, 0xff, cbRead);
1902 break;
1903 }
1904 memset(pvBuf, 0xff, cb);
1905
1906 cbRead -= cb;
1907 pvBuf = (char *)pvBuf + cb;
1908 GCPhys += cb;
1909 }
1910 } /* Ram range walk */
1911
1912 pgmUnlock(pVM);
1913 return VINF_SUCCESS;
1914}
1915
1916
1917/**
1918 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1919 *
1920 * @returns VBox status code. Can be ignored in ring-3.
1921 * @retval VINF_SUCCESS.
1922 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1923 *
1924 * @param pVM The VM handle.
1925 * @param pPage The page descriptor.
1926 * @param GCPhys The physical address to start writing at.
1927 * @param pvBuf What to write.
1928 * @param cbWrite How much to write - less or equal to a page.
1929 */
1930static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1931{
1932 void *pvDst = NULL;
1933 int rc;
1934
1935 /*
1936 * Give priority to physical handlers (like #PF does).
1937 *
1938 * Hope for a lonely physical handler first that covers the whole
1939 * write area. This should be a pretty frequent case with MMIO and
1940 * the heavy usage of full page handlers in the page pool.
1941 */
1942 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1943 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1944 {
1945 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1946 if (pCur)
1947 {
1948 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1949 Assert(pCur->CTX_SUFF(pfnHandler));
1950
1951 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1952 if (cbRange > cbWrite)
1953 cbRange = cbWrite;
1954
1955#ifndef IN_RING3
1956 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1957 NOREF(cbRange);
1958 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1959 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1960
1961#else /* IN_RING3 */
1962 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1963 if (!PGM_PAGE_IS_MMIO(pPage))
1964 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1965 else
1966 rc = VINF_SUCCESS;
1967 if (RT_SUCCESS(rc))
1968 {
1969 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
1970 void *pvUser = pCur->CTX_SUFF(pvUser);
1971
1972 STAM_PROFILE_START(&pCur->Stat, h);
1973 Assert(PGMIsLockOwner(pVM));
1974 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1975 pgmUnlock(pVM);
1976 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
1977 pgmLock(pVM);
1978# ifdef VBOX_WITH_STATISTICS
1979 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1980 if (pCur)
1981 STAM_PROFILE_STOP(&pCur->Stat, h);
1982# else
1983 pCur = NULL; /* might not be valid anymore. */
1984# endif
1985 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1986 memcpy(pvDst, pvBuf, cbRange);
1987 else
1988 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
1989 }
1990 else
1991 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1992 GCPhys, pPage, rc), rc);
1993 if (RT_LIKELY(cbRange == cbWrite))
1994 return VINF_SUCCESS;
1995
1996 /* more fun to be had below */
1997 cbWrite -= cbRange;
1998 GCPhys += cbRange;
1999 pvBuf = (uint8_t *)pvBuf + cbRange;
2000 pvDst = (uint8_t *)pvDst + cbRange;
2001#endif /* IN_RING3 */
2002 }
2003 /* else: the handler is somewhere else in the page, deal with it below. */
2004 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2005 }
2006 /*
2007 * A virtual handler without any interfering physical handlers.
2008 * Hopefully it'll conver the whole write.
2009 */
2010 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2011 {
2012 unsigned iPage;
2013 PPGMVIRTHANDLER pCur;
2014 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2015 if (RT_SUCCESS(rc))
2016 {
2017 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2018 if (cbRange > cbWrite)
2019 cbRange = cbWrite;
2020
2021#ifndef IN_RING3
2022 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2023 NOREF(cbRange);
2024 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2025 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2026
2027#else /* IN_RING3 */
2028
2029 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2030 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2031 if (RT_SUCCESS(rc))
2032 {
2033 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2034 if (pCur->pfnHandlerR3)
2035 {
2036 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2037 + (iPage << PAGE_SHIFT)
2038 + (GCPhys & PAGE_OFFSET_MASK);
2039
2040 STAM_PROFILE_START(&pCur->Stat, h);
2041 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2042 STAM_PROFILE_STOP(&pCur->Stat, h);
2043 }
2044 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2045 memcpy(pvDst, pvBuf, cbRange);
2046 else
2047 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2048 }
2049 else
2050 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2051 GCPhys, pPage, rc), rc);
2052 if (RT_LIKELY(cbRange == cbWrite))
2053 return VINF_SUCCESS;
2054
2055 /* more fun to be had below */
2056 cbWrite -= cbRange;
2057 GCPhys += cbRange;
2058 pvBuf = (uint8_t *)pvBuf + cbRange;
2059 pvDst = (uint8_t *)pvDst + cbRange;
2060#endif
2061 }
2062 /* else: the handler is somewhere else in the page, deal with it below. */
2063 }
2064
2065 /*
2066 * Deal with all the odd ends.
2067 */
2068
2069 /* We need a writable destination page. */
2070 if (!pvDst)
2071 {
2072 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2073 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2074 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2075 GCPhys, pPage, rc), rc);
2076 }
2077
2078 /* The loop state (big + ugly). */
2079 unsigned iVirtPage = 0;
2080 PPGMVIRTHANDLER pVirt = NULL;
2081 uint32_t offVirt = PAGE_SIZE;
2082 uint32_t offVirtLast = PAGE_SIZE;
2083 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2084
2085 PPGMPHYSHANDLER pPhys = NULL;
2086 uint32_t offPhys = PAGE_SIZE;
2087 uint32_t offPhysLast = PAGE_SIZE;
2088 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2089
2090 /* The loop. */
2091 for (;;)
2092 {
2093 /*
2094 * Find the closest handler at or above GCPhys.
2095 */
2096 if (fMoreVirt && !pVirt)
2097 {
2098 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2099 if (RT_SUCCESS(rc))
2100 {
2101 offVirt = 0;
2102 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2103 }
2104 else
2105 {
2106 PPGMPHYS2VIRTHANDLER pVirtPhys;
2107 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2108 GCPhys, true /* fAbove */);
2109 if ( pVirtPhys
2110 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2111 {
2112 /* ASSUME that pVirtPhys only covers one page. */
2113 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2114 Assert(pVirtPhys->Core.Key > GCPhys);
2115
2116 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2117 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2118 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2119 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2120 }
2121 else
2122 {
2123 pVirt = NULL;
2124 fMoreVirt = false;
2125 offVirt = offVirtLast = PAGE_SIZE;
2126 }
2127 }
2128 }
2129
2130 if (fMorePhys && !pPhys)
2131 {
2132 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2133 if (pPhys)
2134 {
2135 offPhys = 0;
2136 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2137 }
2138 else
2139 {
2140 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2141 GCPhys, true /* fAbove */);
2142 if ( pPhys
2143 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2144 {
2145 offPhys = pPhys->Core.Key - GCPhys;
2146 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2147 }
2148 else
2149 {
2150 pPhys = NULL;
2151 fMorePhys = false;
2152 offPhys = offPhysLast = PAGE_SIZE;
2153 }
2154 }
2155 }
2156
2157 /*
2158 * Handle access to space without handlers (that's easy).
2159 */
2160 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2161 uint32_t cbRange = (uint32_t)cbWrite;
2162 if (offPhys && offVirt)
2163 {
2164 if (cbRange > offPhys)
2165 cbRange = offPhys;
2166 if (cbRange > offVirt)
2167 cbRange = offVirt;
2168 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2169 }
2170 /*
2171 * Physical handler.
2172 */
2173 else if (!offPhys && offVirt)
2174 {
2175 if (cbRange > offPhysLast + 1)
2176 cbRange = offPhysLast + 1;
2177 if (cbRange > offVirt)
2178 cbRange = offVirt;
2179#ifdef IN_RING3
2180 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2181 void *pvUser = pPhys->CTX_SUFF(pvUser);
2182
2183 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2184 STAM_PROFILE_START(&pPhys->Stat, h);
2185 Assert(PGMIsLockOwner(pVM));
2186 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2187 pgmUnlock(pVM);
2188 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2189 pgmLock(pVM);
2190# ifdef VBOX_WITH_STATISTICS
2191 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2192 if (pPhys)
2193 STAM_PROFILE_STOP(&pPhys->Stat, h);
2194# else
2195 pPhys = NULL; /* might not be valid anymore. */
2196# endif
2197 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2198#else
2199 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2200 NOREF(cbRange);
2201 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2202 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2203#endif
2204 }
2205 /*
2206 * Virtual handler.
2207 */
2208 else if (offPhys && !offVirt)
2209 {
2210 if (cbRange > offVirtLast + 1)
2211 cbRange = offVirtLast + 1;
2212 if (cbRange > offPhys)
2213 cbRange = offPhys;
2214#ifdef IN_RING3
2215 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2216 if (pVirt->pfnHandlerR3)
2217 {
2218 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2219 + (iVirtPage << PAGE_SHIFT)
2220 + (GCPhys & PAGE_OFFSET_MASK);
2221 STAM_PROFILE_START(&pVirt->Stat, h);
2222 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2223 STAM_PROFILE_STOP(&pVirt->Stat, h);
2224 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2225 }
2226 pVirt = NULL;
2227#else
2228 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2229 NOREF(cbRange);
2230 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2231 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2232#endif
2233 }
2234 /*
2235 * Both... give the physical one priority.
2236 */
2237 else
2238 {
2239 Assert(!offPhys && !offVirt);
2240 if (cbRange > offVirtLast + 1)
2241 cbRange = offVirtLast + 1;
2242 if (cbRange > offPhysLast + 1)
2243 cbRange = offPhysLast + 1;
2244
2245#ifdef IN_RING3
2246 if (pVirt->pfnHandlerR3)
2247 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2248 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2249
2250 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2251 void *pvUser = pPhys->CTX_SUFF(pvUser);
2252
2253 STAM_PROFILE_START(&pPhys->Stat, h);
2254 Assert(PGMIsLockOwner(pVM));
2255 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2256 pgmUnlock(pVM);
2257 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2258 pgmLock(pVM);
2259# ifdef VBOX_WITH_STATISTICS
2260 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2261 if (pPhys)
2262 STAM_PROFILE_STOP(&pPhys->Stat, h);
2263# else
2264 pPhys = NULL; /* might not be valid anymore. */
2265# endif
2266 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2267 if (pVirt->pfnHandlerR3)
2268 {
2269
2270 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2271 + (iVirtPage << PAGE_SHIFT)
2272 + (GCPhys & PAGE_OFFSET_MASK);
2273 STAM_PROFILE_START(&pVirt->Stat, h2);
2274 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2275 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2276 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2277 rc = VINF_SUCCESS;
2278 else
2279 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2280 }
2281 pPhys = NULL;
2282 pVirt = NULL;
2283#else
2284 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2285 NOREF(cbRange);
2286 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2287 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2288#endif
2289 }
2290 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2291 memcpy(pvDst, pvBuf, cbRange);
2292
2293 /*
2294 * Advance if we've got more stuff to do.
2295 */
2296 if (cbRange >= cbWrite)
2297 return VINF_SUCCESS;
2298
2299 cbWrite -= cbRange;
2300 GCPhys += cbRange;
2301 pvBuf = (uint8_t *)pvBuf + cbRange;
2302 pvDst = (uint8_t *)pvDst + cbRange;
2303
2304 offPhys -= cbRange;
2305 offPhysLast -= cbRange;
2306 offVirt -= cbRange;
2307 offVirtLast -= cbRange;
2308 }
2309}
2310
2311
2312/**
2313 * Write to physical memory.
2314 *
2315 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2316 * want to ignore those.
2317 *
2318 * @returns VBox status code. Can be ignored in ring-3.
2319 * @retval VINF_SUCCESS.
2320 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2321 *
2322 * @param pVM VM Handle.
2323 * @param GCPhys Physical address to write to.
2324 * @param pvBuf What to write.
2325 * @param cbWrite How many bytes to write.
2326 */
2327VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2328{
2329 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2330 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2331 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2332
2333 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2334 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2335
2336 pgmLock(pVM);
2337
2338 /*
2339 * Copy loop on ram ranges.
2340 */
2341 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2342 for (;;)
2343 {
2344 /* Find range. */
2345 while (pRam && GCPhys > pRam->GCPhysLast)
2346 pRam = pRam->CTX_SUFF(pNext);
2347 /* Inside range or not? */
2348 if (pRam && GCPhys >= pRam->GCPhys)
2349 {
2350 /*
2351 * Must work our way thru this page by page.
2352 */
2353 RTGCPTR off = GCPhys - pRam->GCPhys;
2354 while (off < pRam->cb)
2355 {
2356 RTGCPTR iPage = off >> PAGE_SHIFT;
2357 PPGMPAGE pPage = &pRam->aPages[iPage];
2358 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2359 if (cb > cbWrite)
2360 cb = cbWrite;
2361
2362 /*
2363 * Any active WRITE or ALL access handlers?
2364 */
2365 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2366 {
2367 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2368 if (RT_FAILURE(rc))
2369 {
2370 pgmUnlock(pVM);
2371 return rc;
2372 }
2373 }
2374 else
2375 {
2376 /*
2377 * Get the pointer to the page.
2378 */
2379 void *pvDst;
2380 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2381 if (RT_SUCCESS(rc))
2382 memcpy(pvDst, pvBuf, cb);
2383 else
2384 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2385 pRam->GCPhys + off, pPage, rc));
2386 }
2387
2388 /* next page */
2389 if (cb >= cbWrite)
2390 {
2391 pgmUnlock(pVM);
2392 return VINF_SUCCESS;
2393 }
2394
2395 cbWrite -= cb;
2396 off += cb;
2397 pvBuf = (const char *)pvBuf + cb;
2398 } /* walk pages in ram range */
2399
2400 GCPhys = pRam->GCPhysLast + 1;
2401 }
2402 else
2403 {
2404 /*
2405 * Unassigned address space, skip it.
2406 */
2407 if (!pRam)
2408 break;
2409 size_t cb = pRam->GCPhys - GCPhys;
2410 if (cb >= cbWrite)
2411 break;
2412 cbWrite -= cb;
2413 pvBuf = (const char *)pvBuf + cb;
2414 GCPhys += cb;
2415 }
2416 } /* Ram range walk */
2417
2418 pgmUnlock(pVM);
2419 return VINF_SUCCESS;
2420}
2421
2422
2423/**
2424 * Read from guest physical memory by GC physical address, bypassing
2425 * MMIO and access handlers.
2426 *
2427 * @returns VBox status.
2428 * @param pVM VM handle.
2429 * @param pvDst The destination address.
2430 * @param GCPhysSrc The source address (GC physical address).
2431 * @param cb The number of bytes to read.
2432 */
2433VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2434{
2435 /*
2436 * Treat the first page as a special case.
2437 */
2438 if (!cb)
2439 return VINF_SUCCESS;
2440
2441 /* map the 1st page */
2442 void const *pvSrc;
2443 PGMPAGEMAPLOCK Lock;
2444 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2445 if (RT_FAILURE(rc))
2446 return rc;
2447
2448 /* optimize for the case where access is completely within the first page. */
2449 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2450 if (RT_LIKELY(cb <= cbPage))
2451 {
2452 memcpy(pvDst, pvSrc, cb);
2453 PGMPhysReleasePageMappingLock(pVM, &Lock);
2454 return VINF_SUCCESS;
2455 }
2456
2457 /* copy to the end of the page. */
2458 memcpy(pvDst, pvSrc, cbPage);
2459 PGMPhysReleasePageMappingLock(pVM, &Lock);
2460 GCPhysSrc += cbPage;
2461 pvDst = (uint8_t *)pvDst + cbPage;
2462 cb -= cbPage;
2463
2464 /*
2465 * Page by page.
2466 */
2467 for (;;)
2468 {
2469 /* map the page */
2470 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2471 if (RT_FAILURE(rc))
2472 return rc;
2473
2474 /* last page? */
2475 if (cb <= PAGE_SIZE)
2476 {
2477 memcpy(pvDst, pvSrc, cb);
2478 PGMPhysReleasePageMappingLock(pVM, &Lock);
2479 return VINF_SUCCESS;
2480 }
2481
2482 /* copy the entire page and advance */
2483 memcpy(pvDst, pvSrc, PAGE_SIZE);
2484 PGMPhysReleasePageMappingLock(pVM, &Lock);
2485 GCPhysSrc += PAGE_SIZE;
2486 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2487 cb -= PAGE_SIZE;
2488 }
2489 /* won't ever get here. */
2490}
2491
2492
2493/**
2494 * Write to guest physical memory referenced by GC pointer.
2495 * Write memory to GC physical address in guest physical memory.
2496 *
2497 * This will bypass MMIO and access handlers.
2498 *
2499 * @returns VBox status.
2500 * @param pVM VM handle.
2501 * @param GCPhysDst The GC physical address of the destination.
2502 * @param pvSrc The source buffer.
2503 * @param cb The number of bytes to write.
2504 */
2505VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2506{
2507 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2508
2509 /*
2510 * Treat the first page as a special case.
2511 */
2512 if (!cb)
2513 return VINF_SUCCESS;
2514
2515 /* map the 1st page */
2516 void *pvDst;
2517 PGMPAGEMAPLOCK Lock;
2518 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2519 if (RT_FAILURE(rc))
2520 return rc;
2521
2522 /* optimize for the case where access is completely within the first page. */
2523 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2524 if (RT_LIKELY(cb <= cbPage))
2525 {
2526 memcpy(pvDst, pvSrc, cb);
2527 PGMPhysReleasePageMappingLock(pVM, &Lock);
2528 return VINF_SUCCESS;
2529 }
2530
2531 /* copy to the end of the page. */
2532 memcpy(pvDst, pvSrc, cbPage);
2533 PGMPhysReleasePageMappingLock(pVM, &Lock);
2534 GCPhysDst += cbPage;
2535 pvSrc = (const uint8_t *)pvSrc + cbPage;
2536 cb -= cbPage;
2537
2538 /*
2539 * Page by page.
2540 */
2541 for (;;)
2542 {
2543 /* map the page */
2544 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2545 if (RT_FAILURE(rc))
2546 return rc;
2547
2548 /* last page? */
2549 if (cb <= PAGE_SIZE)
2550 {
2551 memcpy(pvDst, pvSrc, cb);
2552 PGMPhysReleasePageMappingLock(pVM, &Lock);
2553 return VINF_SUCCESS;
2554 }
2555
2556 /* copy the entire page and advance */
2557 memcpy(pvDst, pvSrc, PAGE_SIZE);
2558 PGMPhysReleasePageMappingLock(pVM, &Lock);
2559 GCPhysDst += PAGE_SIZE;
2560 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2561 cb -= PAGE_SIZE;
2562 }
2563 /* won't ever get here. */
2564}
2565
2566
2567/**
2568 * Read from guest physical memory referenced by GC pointer.
2569 *
2570 * This function uses the current CR3/CR0/CR4 of the guest and will
2571 * bypass access handlers and not set any accessed bits.
2572 *
2573 * @returns VBox status.
2574 * @param pVCpu The VMCPU handle.
2575 * @param pvDst The destination address.
2576 * @param GCPtrSrc The source address (GC pointer).
2577 * @param cb The number of bytes to read.
2578 */
2579VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2580{
2581 PVM pVM = pVCpu->CTX_SUFF(pVM);
2582
2583 /*
2584 * Treat the first page as a special case.
2585 */
2586 if (!cb)
2587 return VINF_SUCCESS;
2588
2589 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2590 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2591
2592 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2593 * when many VCPUs are fighting for the lock.
2594 */
2595 pgmLock(pVM);
2596
2597 /* map the 1st page */
2598 void const *pvSrc;
2599 PGMPAGEMAPLOCK Lock;
2600 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2601 if (RT_FAILURE(rc))
2602 {
2603 pgmUnlock(pVM);
2604 return rc;
2605 }
2606
2607 /* optimize for the case where access is completely within the first page. */
2608 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2609 if (RT_LIKELY(cb <= cbPage))
2610 {
2611 memcpy(pvDst, pvSrc, cb);
2612 PGMPhysReleasePageMappingLock(pVM, &Lock);
2613 pgmUnlock(pVM);
2614 return VINF_SUCCESS;
2615 }
2616
2617 /* copy to the end of the page. */
2618 memcpy(pvDst, pvSrc, cbPage);
2619 PGMPhysReleasePageMappingLock(pVM, &Lock);
2620 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2621 pvDst = (uint8_t *)pvDst + cbPage;
2622 cb -= cbPage;
2623
2624 /*
2625 * Page by page.
2626 */
2627 for (;;)
2628 {
2629 /* map the page */
2630 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2631 if (RT_FAILURE(rc))
2632 {
2633 pgmUnlock(pVM);
2634 return rc;
2635 }
2636
2637 /* last page? */
2638 if (cb <= PAGE_SIZE)
2639 {
2640 memcpy(pvDst, pvSrc, cb);
2641 PGMPhysReleasePageMappingLock(pVM, &Lock);
2642 pgmUnlock(pVM);
2643 return VINF_SUCCESS;
2644 }
2645
2646 /* copy the entire page and advance */
2647 memcpy(pvDst, pvSrc, PAGE_SIZE);
2648 PGMPhysReleasePageMappingLock(pVM, &Lock);
2649 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2650 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2651 cb -= PAGE_SIZE;
2652 }
2653 /* won't ever get here. */
2654}
2655
2656
2657/**
2658 * Write to guest physical memory referenced by GC pointer.
2659 *
2660 * This function uses the current CR3/CR0/CR4 of the guest and will
2661 * bypass access handlers and not set dirty or accessed bits.
2662 *
2663 * @returns VBox status.
2664 * @param pVCpu The VMCPU handle.
2665 * @param GCPtrDst The destination address (GC pointer).
2666 * @param pvSrc The source address.
2667 * @param cb The number of bytes to write.
2668 */
2669VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2670{
2671 PVM pVM = pVCpu->CTX_SUFF(pVM);
2672
2673 /*
2674 * Treat the first page as a special case.
2675 */
2676 if (!cb)
2677 return VINF_SUCCESS;
2678
2679 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2680 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2681
2682 /* map the 1st page */
2683 void *pvDst;
2684 PGMPAGEMAPLOCK Lock;
2685 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2686 if (RT_FAILURE(rc))
2687 return rc;
2688
2689 /* optimize for the case where access is completely within the first page. */
2690 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2691 if (RT_LIKELY(cb <= cbPage))
2692 {
2693 memcpy(pvDst, pvSrc, cb);
2694 PGMPhysReleasePageMappingLock(pVM, &Lock);
2695 return VINF_SUCCESS;
2696 }
2697
2698 /* copy to the end of the page. */
2699 memcpy(pvDst, pvSrc, cbPage);
2700 PGMPhysReleasePageMappingLock(pVM, &Lock);
2701 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2702 pvSrc = (const uint8_t *)pvSrc + cbPage;
2703 cb -= cbPage;
2704
2705 /*
2706 * Page by page.
2707 */
2708 for (;;)
2709 {
2710 /* map the page */
2711 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2712 if (RT_FAILURE(rc))
2713 return rc;
2714
2715 /* last page? */
2716 if (cb <= PAGE_SIZE)
2717 {
2718 memcpy(pvDst, pvSrc, cb);
2719 PGMPhysReleasePageMappingLock(pVM, &Lock);
2720 return VINF_SUCCESS;
2721 }
2722
2723 /* copy the entire page and advance */
2724 memcpy(pvDst, pvSrc, PAGE_SIZE);
2725 PGMPhysReleasePageMappingLock(pVM, &Lock);
2726 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2727 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2728 cb -= PAGE_SIZE;
2729 }
2730 /* won't ever get here. */
2731}
2732
2733
2734/**
2735 * Write to guest physical memory referenced by GC pointer and update the PTE.
2736 *
2737 * This function uses the current CR3/CR0/CR4 of the guest and will
2738 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2739 *
2740 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2741 *
2742 * @returns VBox status.
2743 * @param pVCpu The VMCPU handle.
2744 * @param GCPtrDst The destination address (GC pointer).
2745 * @param pvSrc The source address.
2746 * @param cb The number of bytes to write.
2747 */
2748VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2749{
2750 PVM pVM = pVCpu->CTX_SUFF(pVM);
2751
2752 /*
2753 * Treat the first page as a special case.
2754 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2755 */
2756 if (!cb)
2757 return VINF_SUCCESS;
2758
2759 /* map the 1st page */
2760 void *pvDst;
2761 PGMPAGEMAPLOCK Lock;
2762 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2763 if (RT_FAILURE(rc))
2764 return rc;
2765
2766 /* optimize for the case where access is completely within the first page. */
2767 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2768 if (RT_LIKELY(cb <= cbPage))
2769 {
2770 memcpy(pvDst, pvSrc, cb);
2771 PGMPhysReleasePageMappingLock(pVM, &Lock);
2772 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2773 return VINF_SUCCESS;
2774 }
2775
2776 /* copy to the end of the page. */
2777 memcpy(pvDst, pvSrc, cbPage);
2778 PGMPhysReleasePageMappingLock(pVM, &Lock);
2779 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2780 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2781 pvSrc = (const uint8_t *)pvSrc + cbPage;
2782 cb -= cbPage;
2783
2784 /*
2785 * Page by page.
2786 */
2787 for (;;)
2788 {
2789 /* map the page */
2790 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2791 if (RT_FAILURE(rc))
2792 return rc;
2793
2794 /* last page? */
2795 if (cb <= PAGE_SIZE)
2796 {
2797 memcpy(pvDst, pvSrc, cb);
2798 PGMPhysReleasePageMappingLock(pVM, &Lock);
2799 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2800 return VINF_SUCCESS;
2801 }
2802
2803 /* copy the entire page and advance */
2804 memcpy(pvDst, pvSrc, PAGE_SIZE);
2805 PGMPhysReleasePageMappingLock(pVM, &Lock);
2806 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2807 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2808 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2809 cb -= PAGE_SIZE;
2810 }
2811 /* won't ever get here. */
2812}
2813
2814
2815/**
2816 * Read from guest physical memory referenced by GC pointer.
2817 *
2818 * This function uses the current CR3/CR0/CR4 of the guest and will
2819 * respect access handlers and set accessed bits.
2820 *
2821 * @returns VBox status.
2822 * @param pVCpu The VMCPU handle.
2823 * @param pvDst The destination address.
2824 * @param GCPtrSrc The source address (GC pointer).
2825 * @param cb The number of bytes to read.
2826 * @thread The vCPU EMT.
2827 */
2828VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2829{
2830 RTGCPHYS GCPhys;
2831 uint64_t fFlags;
2832 int rc;
2833 PVM pVM = pVCpu->CTX_SUFF(pVM);
2834
2835 /*
2836 * Anything to do?
2837 */
2838 if (!cb)
2839 return VINF_SUCCESS;
2840
2841 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2842
2843 /*
2844 * Optimize reads within a single page.
2845 */
2846 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2847 {
2848 /* Convert virtual to physical address + flags */
2849 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2850 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2851 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2852
2853 /* mark the guest page as accessed. */
2854 if (!(fFlags & X86_PTE_A))
2855 {
2856 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2857 AssertRC(rc);
2858 }
2859
2860 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2861 }
2862
2863 /*
2864 * Page by page.
2865 */
2866 for (;;)
2867 {
2868 /* Convert virtual to physical address + flags */
2869 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2870 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2871 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2872
2873 /* mark the guest page as accessed. */
2874 if (!(fFlags & X86_PTE_A))
2875 {
2876 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2877 AssertRC(rc);
2878 }
2879
2880 /* copy */
2881 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2882 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2883 if (cbRead >= cb || RT_FAILURE(rc))
2884 return rc;
2885
2886 /* next */
2887 cb -= cbRead;
2888 pvDst = (uint8_t *)pvDst + cbRead;
2889 GCPtrSrc += cbRead;
2890 }
2891}
2892
2893
2894/**
2895 * Write to guest physical memory referenced by GC pointer.
2896 *
2897 * This function uses the current CR3/CR0/CR4 of the guest and will
2898 * respect access handlers and set dirty and accessed bits.
2899 *
2900 * @returns VBox status.
2901 * @retval VINF_SUCCESS.
2902 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2903 *
2904 * @param pVCpu The VMCPU handle.
2905 * @param GCPtrDst The destination address (GC pointer).
2906 * @param pvSrc The source address.
2907 * @param cb The number of bytes to write.
2908 */
2909VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2910{
2911 RTGCPHYS GCPhys;
2912 uint64_t fFlags;
2913 int rc;
2914 PVM pVM = pVCpu->CTX_SUFF(pVM);
2915
2916 /*
2917 * Anything to do?
2918 */
2919 if (!cb)
2920 return VINF_SUCCESS;
2921
2922 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2923
2924 /*
2925 * Optimize writes within a single page.
2926 */
2927 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2928 {
2929 /* Convert virtual to physical address + flags */
2930 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2931 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2932 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2933
2934 /* Mention when we ignore X86_PTE_RW... */
2935 if (!(fFlags & X86_PTE_RW))
2936 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2937
2938 /* Mark the guest page as accessed and dirty if necessary. */
2939 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2940 {
2941 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2942 AssertRC(rc);
2943 }
2944
2945 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2946 }
2947
2948 /*
2949 * Page by page.
2950 */
2951 for (;;)
2952 {
2953 /* Convert virtual to physical address + flags */
2954 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2955 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2956 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2957
2958 /* Mention when we ignore X86_PTE_RW... */
2959 if (!(fFlags & X86_PTE_RW))
2960 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2961
2962 /* Mark the guest page as accessed and dirty if necessary. */
2963 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2964 {
2965 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2966 AssertRC(rc);
2967 }
2968
2969 /* copy */
2970 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2971 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2972 if (cbWrite >= cb || RT_FAILURE(rc))
2973 return rc;
2974
2975 /* next */
2976 cb -= cbWrite;
2977 pvSrc = (uint8_t *)pvSrc + cbWrite;
2978 GCPtrDst += cbWrite;
2979 }
2980}
2981
2982
2983/**
2984 * Performs a read of guest virtual memory for instruction emulation.
2985 *
2986 * This will check permissions, raise exceptions and update the access bits.
2987 *
2988 * The current implementation will bypass all access handlers. It may later be
2989 * changed to at least respect MMIO.
2990 *
2991 *
2992 * @returns VBox status code suitable to scheduling.
2993 * @retval VINF_SUCCESS if the read was performed successfully.
2994 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2995 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2996 *
2997 * @param pVCpu The VMCPU handle.
2998 * @param pCtxCore The context core.
2999 * @param pvDst Where to put the bytes we've read.
3000 * @param GCPtrSrc The source address.
3001 * @param cb The number of bytes to read. Not more than a page.
3002 *
3003 * @remark This function will dynamically map physical pages in GC. This may unmap
3004 * mappings done by the caller. Be careful!
3005 */
3006VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3007{
3008 PVM pVM = pVCpu->CTX_SUFF(pVM);
3009 Assert(cb <= PAGE_SIZE);
3010
3011/** @todo r=bird: This isn't perfect!
3012 * -# It's not checking for reserved bits being 1.
3013 * -# It's not correctly dealing with the access bit.
3014 * -# It's not respecting MMIO memory or any other access handlers.
3015 */
3016 /*
3017 * 1. Translate virtual to physical. This may fault.
3018 * 2. Map the physical address.
3019 * 3. Do the read operation.
3020 * 4. Set access bits if required.
3021 */
3022 int rc;
3023 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3024 if (cb <= cb1)
3025 {
3026 /*
3027 * Not crossing pages.
3028 */
3029 RTGCPHYS GCPhys;
3030 uint64_t fFlags;
3031 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3032 if (RT_SUCCESS(rc))
3033 {
3034 /** @todo we should check reserved bits ... */
3035 void *pvSrc;
3036 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
3037 switch (rc)
3038 {
3039 case VINF_SUCCESS:
3040 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3041 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3042 break;
3043 case VERR_PGM_PHYS_PAGE_RESERVED:
3044 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3045 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
3046 break;
3047 default:
3048 return rc;
3049 }
3050
3051 /** @todo access bit emulation isn't 100% correct. */
3052 if (!(fFlags & X86_PTE_A))
3053 {
3054 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3055 AssertRC(rc);
3056 }
3057 return VINF_SUCCESS;
3058 }
3059 }
3060 else
3061 {
3062 /*
3063 * Crosses pages.
3064 */
3065 size_t cb2 = cb - cb1;
3066 uint64_t fFlags1;
3067 RTGCPHYS GCPhys1;
3068 uint64_t fFlags2;
3069 RTGCPHYS GCPhys2;
3070 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3071 if (RT_SUCCESS(rc))
3072 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3073 if (RT_SUCCESS(rc))
3074 {
3075 /** @todo we should check reserved bits ... */
3076 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3077 void *pvSrc1;
3078 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
3079 switch (rc)
3080 {
3081 case VINF_SUCCESS:
3082 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3083 break;
3084 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3085 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
3086 break;
3087 default:
3088 return rc;
3089 }
3090
3091 void *pvSrc2;
3092 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
3093 switch (rc)
3094 {
3095 case VINF_SUCCESS:
3096 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3097 break;
3098 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3099 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
3100 break;
3101 default:
3102 return rc;
3103 }
3104
3105 if (!(fFlags1 & X86_PTE_A))
3106 {
3107 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3108 AssertRC(rc);
3109 }
3110 if (!(fFlags2 & X86_PTE_A))
3111 {
3112 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3113 AssertRC(rc);
3114 }
3115 return VINF_SUCCESS;
3116 }
3117 }
3118
3119 /*
3120 * Raise a #PF.
3121 */
3122 uint32_t uErr;
3123
3124 /* Get the current privilege level. */
3125 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3126 switch (rc)
3127 {
3128 case VINF_SUCCESS:
3129 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3130 break;
3131
3132 case VERR_PAGE_NOT_PRESENT:
3133 case VERR_PAGE_TABLE_NOT_PRESENT:
3134 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3135 break;
3136
3137 default:
3138 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3139 return rc;
3140 }
3141 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3142 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3143}
3144
3145
3146/**
3147 * Performs a read of guest virtual memory for instruction emulation.
3148 *
3149 * This will check permissions, raise exceptions and update the access bits.
3150 *
3151 * The current implementation will bypass all access handlers. It may later be
3152 * changed to at least respect MMIO.
3153 *
3154 *
3155 * @returns VBox status code suitable to scheduling.
3156 * @retval VINF_SUCCESS if the read was performed successfully.
3157 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3158 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3159 *
3160 * @param pVCpu The VMCPU handle.
3161 * @param pCtxCore The context core.
3162 * @param pvDst Where to put the bytes we've read.
3163 * @param GCPtrSrc The source address.
3164 * @param cb The number of bytes to read. Not more than a page.
3165 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3166 * an appropriate error status will be returned (no
3167 * informational at all).
3168 *
3169 *
3170 * @remarks Takes the PGM lock.
3171 * @remarks A page fault on the 2nd page of the access will be raised without
3172 * writing the bits on the first page since we're ASSUMING that the
3173 * caller is emulating an instruction access.
3174 * @remarks This function will dynamically map physical pages in GC. This may
3175 * unmap mappings done by the caller. Be careful!
3176 */
3177VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3178{
3179 PVM pVM = pVCpu->CTX_SUFF(pVM);
3180 Assert(cb <= PAGE_SIZE);
3181
3182 /*
3183 * 1. Translate virtual to physical. This may fault.
3184 * 2. Map the physical address.
3185 * 3. Do the read operation.
3186 * 4. Set access bits if required.
3187 */
3188 int rc;
3189 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3190 if (cb <= cb1)
3191 {
3192 /*
3193 * Not crossing pages.
3194 */
3195 RTGCPHYS GCPhys;
3196 uint64_t fFlags;
3197 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3198 if (RT_SUCCESS(rc))
3199 {
3200 if (1) /** @todo we should check reserved bits ... */
3201 {
3202 const void *pvSrc;
3203 PGMPAGEMAPLOCK Lock;
3204 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3205 switch (rc)
3206 {
3207 case VINF_SUCCESS:
3208 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3209 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3210 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3211 break;
3212 case VERR_PGM_PHYS_PAGE_RESERVED:
3213 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3214 memset(pvDst, 0xff, cb);
3215 break;
3216 default:
3217 AssertMsgFailed(("%Rrc\n", rc));
3218 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3219 return rc;
3220 }
3221 PGMPhysReleasePageMappingLock(pVM, &Lock);
3222
3223 if (!(fFlags & X86_PTE_A))
3224 {
3225 /** @todo access bit emulation isn't 100% correct. */
3226 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3227 AssertRC(rc);
3228 }
3229 return VINF_SUCCESS;
3230 }
3231 }
3232 }
3233 else
3234 {
3235 /*
3236 * Crosses pages.
3237 */
3238 size_t cb2 = cb - cb1;
3239 uint64_t fFlags1;
3240 RTGCPHYS GCPhys1;
3241 uint64_t fFlags2;
3242 RTGCPHYS GCPhys2;
3243 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3244 if (RT_SUCCESS(rc))
3245 {
3246 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3247 if (RT_SUCCESS(rc))
3248 {
3249 if (1) /** @todo we should check reserved bits ... */
3250 {
3251 const void *pvSrc;
3252 PGMPAGEMAPLOCK Lock;
3253 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3254 switch (rc)
3255 {
3256 case VINF_SUCCESS:
3257 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3258 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3259 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3260 PGMPhysReleasePageMappingLock(pVM, &Lock);
3261 break;
3262 case VERR_PGM_PHYS_PAGE_RESERVED:
3263 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3264 memset(pvDst, 0xff, cb1);
3265 break;
3266 default:
3267 AssertMsgFailed(("%Rrc\n", rc));
3268 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3269 return rc;
3270 }
3271
3272 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3273 switch (rc)
3274 {
3275 case VINF_SUCCESS:
3276 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3277 PGMPhysReleasePageMappingLock(pVM, &Lock);
3278 break;
3279 case VERR_PGM_PHYS_PAGE_RESERVED:
3280 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3281 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3282 break;
3283 default:
3284 AssertMsgFailed(("%Rrc\n", rc));
3285 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3286 return rc;
3287 }
3288
3289 if (!(fFlags1 & X86_PTE_A))
3290 {
3291 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3292 AssertRC(rc);
3293 }
3294 if (!(fFlags2 & X86_PTE_A))
3295 {
3296 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3297 AssertRC(rc);
3298 }
3299 return VINF_SUCCESS;
3300 }
3301 /* sort out which page */
3302 }
3303 else
3304 GCPtrSrc += cb1; /* fault on 2nd page */
3305 }
3306 }
3307
3308 /*
3309 * Raise a #PF if we're allowed to do that.
3310 */
3311 /* Calc the error bits. */
3312 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3313 uint32_t uErr;
3314 switch (rc)
3315 {
3316 case VINF_SUCCESS:
3317 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3318 rc = VERR_ACCESS_DENIED;
3319 break;
3320
3321 case VERR_PAGE_NOT_PRESENT:
3322 case VERR_PAGE_TABLE_NOT_PRESENT:
3323 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3324 break;
3325
3326 default:
3327 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3328 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3329 return rc;
3330 }
3331 if (fRaiseTrap)
3332 {
3333 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3334 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3335 }
3336 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3337 return rc;
3338}
3339
3340
3341/**
3342 * Performs a write to guest virtual memory for instruction emulation.
3343 *
3344 * This will check permissions, raise exceptions and update the dirty and access
3345 * bits.
3346 *
3347 * @returns VBox status code suitable to scheduling.
3348 * @retval VINF_SUCCESS if the read was performed successfully.
3349 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3350 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3351 *
3352 * @param pVCpu The VMCPU handle.
3353 * @param pCtxCore The context core.
3354 * @param GCPtrDst The destination address.
3355 * @param pvSrc What to write.
3356 * @param cb The number of bytes to write. Not more than a page.
3357 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3358 * an appropriate error status will be returned (no
3359 * informational at all).
3360 *
3361 * @remarks Takes the PGM lock.
3362 * @remarks A page fault on the 2nd page of the access will be raised without
3363 * writing the bits on the first page since we're ASSUMING that the
3364 * caller is emulating an instruction access.
3365 * @remarks This function will dynamically map physical pages in GC. This may
3366 * unmap mappings done by the caller. Be careful!
3367 */
3368VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3369{
3370 Assert(cb <= PAGE_SIZE);
3371 PVM pVM = pVCpu->CTX_SUFF(pVM);
3372
3373 /*
3374 * 1. Translate virtual to physical. This may fault.
3375 * 2. Map the physical address.
3376 * 3. Do the write operation.
3377 * 4. Set access bits if required.
3378 */
3379 int rc;
3380 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3381 if (cb <= cb1)
3382 {
3383 /*
3384 * Not crossing pages.
3385 */
3386 RTGCPHYS GCPhys;
3387 uint64_t fFlags;
3388 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3389 if (RT_SUCCESS(rc))
3390 {
3391 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3392 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3393 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3394 {
3395 void *pvDst;
3396 PGMPAGEMAPLOCK Lock;
3397 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3398 switch (rc)
3399 {
3400 case VINF_SUCCESS:
3401 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3402 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3403 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3404 PGMPhysReleasePageMappingLock(pVM, &Lock);
3405 break;
3406 case VERR_PGM_PHYS_PAGE_RESERVED:
3407 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3408 /* bit bucket */
3409 break;
3410 default:
3411 AssertMsgFailed(("%Rrc\n", rc));
3412 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3413 return rc;
3414 }
3415
3416 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3417 {
3418 /** @todo dirty & access bit emulation isn't 100% correct. */
3419 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3420 AssertRC(rc);
3421 }
3422 return VINF_SUCCESS;
3423 }
3424 rc = VERR_ACCESS_DENIED;
3425 }
3426 }
3427 else
3428 {
3429 /*
3430 * Crosses pages.
3431 */
3432 size_t cb2 = cb - cb1;
3433 uint64_t fFlags1;
3434 RTGCPHYS GCPhys1;
3435 uint64_t fFlags2;
3436 RTGCPHYS GCPhys2;
3437 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3438 if (RT_SUCCESS(rc))
3439 {
3440 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3441 if (RT_SUCCESS(rc))
3442 {
3443 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3444 && (fFlags2 & X86_PTE_RW))
3445 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3446 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3447 {
3448 void *pvDst;
3449 PGMPAGEMAPLOCK Lock;
3450 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3451 switch (rc)
3452 {
3453 case VINF_SUCCESS:
3454 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3455 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3456 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3457 PGMPhysReleasePageMappingLock(pVM, &Lock);
3458 break;
3459 case VERR_PGM_PHYS_PAGE_RESERVED:
3460 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3461 /* bit bucket */
3462 break;
3463 default:
3464 AssertMsgFailed(("%Rrc\n", rc));
3465 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3466 return rc;
3467 }
3468
3469 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3470 switch (rc)
3471 {
3472 case VINF_SUCCESS:
3473 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3474 PGMPhysReleasePageMappingLock(pVM, &Lock);
3475 break;
3476 case VERR_PGM_PHYS_PAGE_RESERVED:
3477 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3478 /* bit bucket */
3479 break;
3480 default:
3481 AssertMsgFailed(("%Rrc\n", rc));
3482 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3483 return rc;
3484 }
3485
3486 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3487 {
3488 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3489 AssertRC(rc);
3490 }
3491 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3492 {
3493 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3494 AssertRC(rc);
3495 }
3496 return VINF_SUCCESS;
3497 }
3498 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3499 GCPtrDst += cb1; /* fault on the 2nd page. */
3500 rc = VERR_ACCESS_DENIED;
3501 }
3502 else
3503 GCPtrDst += cb1; /* fault on the 2nd page. */
3504 }
3505 }
3506
3507 /*
3508 * Raise a #PF if we're allowed to do that.
3509 */
3510 /* Calc the error bits. */
3511 uint32_t uErr;
3512 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3513 switch (rc)
3514 {
3515 case VINF_SUCCESS:
3516 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3517 rc = VERR_ACCESS_DENIED;
3518 break;
3519
3520 case VERR_ACCESS_DENIED:
3521 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3522 break;
3523
3524 case VERR_PAGE_NOT_PRESENT:
3525 case VERR_PAGE_TABLE_NOT_PRESENT:
3526 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3527 break;
3528
3529 default:
3530 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3531 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3532 return rc;
3533 }
3534 if (fRaiseTrap)
3535 {
3536 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3537 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3538 }
3539 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3540 return rc;
3541}
3542
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette