VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 24723

Last change on this file since 24723 was 24723, checked in by vboxsync, 15 years ago

Introducing PGMPhysInvalidatePageMapTLBEntry

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 120.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 24723 2009-11-17 14:09:41Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 PVMCPU pVCpu = VMMGetCpu(pVM);
66
67 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
68 switch (pRom->aPages[iPage].enmProt)
69 {
70 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
71 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
72 {
73 /*
74 * If it's a simple instruction which doesn't change the cpu state
75 * we will simply skip it. Otherwise we'll have to defer it to REM.
76 */
77 uint32_t cbOp;
78 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
79 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
80 if ( RT_SUCCESS(rc)
81 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
82 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
83 {
84 switch (pDis->opcode)
85 {
86 /** @todo Find other instructions we can safely skip, possibly
87 * adding this kind of detection to DIS or EM. */
88 case OP_MOV:
89 pRegFrame->rip += cbOp;
90 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
91 return VINF_SUCCESS;
92 }
93 }
94 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
95 return rc;
96 break;
97 }
98
99 case PGMROMPROT_READ_RAM_WRITE_RAM:
100 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
101 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
102 AssertRC(rc);
103 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
104
105 case PGMROMPROT_READ_ROM_WRITE_RAM:
106 /* Handle it in ring-3 because it's *way* easier there. */
107 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
108 break;
109
110 default:
111 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
112 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
113 VERR_INTERNAL_ERROR);
114 }
115
116 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
117 return VINF_EM_RAW_EMULATE_INSTR;
118}
119
120#endif /* IN_RING3 */
121
122/**
123 * Checks if Address Gate 20 is enabled or not.
124 *
125 * @returns true if enabled.
126 * @returns false if disabled.
127 * @param pVCpu VMCPU handle.
128 */
129VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
130{
131 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
132 return pVCpu->pgm.s.fA20Enabled;
133}
134
135
136/**
137 * Validates a GC physical address.
138 *
139 * @returns true if valid.
140 * @returns false if invalid.
141 * @param pVM The VM handle.
142 * @param GCPhys The physical address to validate.
143 */
144VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
145{
146 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
147 return pPage != NULL;
148}
149
150
151/**
152 * Checks if a GC physical address is a normal page,
153 * i.e. not ROM, MMIO or reserved.
154 *
155 * @returns true if normal.
156 * @returns false if invalid, ROM, MMIO or reserved page.
157 * @param pVM The VM handle.
158 * @param GCPhys The physical address to check.
159 */
160VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
161{
162 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
163 return pPage
164 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
165}
166
167
168/**
169 * Converts a GC physical address to a HC physical address.
170 *
171 * @returns VINF_SUCCESS on success.
172 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
173 * page but has no physical backing.
174 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
175 * GC physical address.
176 *
177 * @param pVM The VM handle.
178 * @param GCPhys The GC physical address to convert.
179 * @param pHCPhys Where to store the HC physical address on success.
180 */
181VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
182{
183 pgmLock(pVM);
184 PPGMPAGE pPage;
185 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
186 if (RT_SUCCESS(rc))
187 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
188 pgmUnlock(pVM);
189 return rc;
190}
191
192
193/**
194 * Invalidates all page mapping TLBs.
195 *
196 * @param pVM The VM handle.
197 */
198VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
199{
200 pgmLock(pVM);
201 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushes);
202 /* Clear the shared R0/R3 TLB completely. */
203 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
204 {
205 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
206 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
207 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
208 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
209 }
210 /* @todo clear the RC TLB whenever we add it. */
211 pgmUnlock(pVM);
212}
213
214/**
215 * Invalidates a page mapping TLB entry
216 *
217 * @param pVM The VM handle.
218 * @param GCPhys GCPhys entry to flush
219 */
220VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
221{
222 Assert(PGMIsLocked(pVM));
223
224 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushEntry);
225 /* Clear the shared R0/R3 TLB entry. */
226#ifdef IN_RC
227 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
228 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
229 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
230 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
231 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
232#else
233 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
234 pTlbe->GCPhys = NIL_RTGCPHYS;
235 pTlbe->pPage = 0;
236 pTlbe->pMap = 0;
237 pTlbe->pv = 0;
238#endif
239 /* @todo clear the RC TLB whenever we add it. */
240}
241
242/**
243 * Makes sure that there is at least one handy page ready for use.
244 *
245 * This will also take the appropriate actions when reaching water-marks.
246 *
247 * @returns VBox status code.
248 * @retval VINF_SUCCESS on success.
249 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
250 *
251 * @param pVM The VM handle.
252 *
253 * @remarks Must be called from within the PGM critical section. It may
254 * nip back to ring-3/0 in some cases.
255 */
256static int pgmPhysEnsureHandyPage(PVM pVM)
257{
258 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
259
260 /*
261 * Do we need to do anything special?
262 */
263#ifdef IN_RING3
264 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
265#else
266 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
267#endif
268 {
269 /*
270 * Allocate pages only if we're out of them, or in ring-3, almost out.
271 */
272#ifdef IN_RING3
273 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
274#else
275 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
276#endif
277 {
278 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
279 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
280#ifdef IN_RING3
281 int rc = PGMR3PhysAllocateHandyPages(pVM);
282#else
283 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
284#endif
285 if (RT_UNLIKELY(rc != VINF_SUCCESS))
286 {
287 if (RT_FAILURE(rc))
288 return rc;
289 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
290 if (!pVM->pgm.s.cHandyPages)
291 {
292 LogRel(("PGM: no more handy pages!\n"));
293 return VERR_EM_NO_MEMORY;
294 }
295 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
296 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
297#ifdef IN_RING3
298 REMR3NotifyFF(pVM);
299#else
300 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
301#endif
302 }
303 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
304 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
305 ("%u\n", pVM->pgm.s.cHandyPages),
306 VERR_INTERNAL_ERROR);
307 }
308 else
309 {
310 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
311 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
312#ifndef IN_RING3
313 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
314 {
315 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
316 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
317 }
318#endif
319 }
320 }
321
322 return VINF_SUCCESS;
323}
324
325
326/**
327 * Replace a zero or shared page with new page that we can write to.
328 *
329 * @returns The following VBox status codes.
330 * @retval VINF_SUCCESS on success, pPage is modified.
331 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
332 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
333 *
334 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
335 *
336 * @param pVM The VM address.
337 * @param pPage The physical page tracking structure. This will
338 * be modified on success.
339 * @param GCPhys The address of the page.
340 *
341 * @remarks Must be called from within the PGM critical section. It may
342 * nip back to ring-3/0 in some cases.
343 *
344 * @remarks This function shouldn't really fail, however if it does
345 * it probably means we've screwed up the size of handy pages and/or
346 * the low-water mark. Or, that some device I/O is causing a lot of
347 * pages to be allocated while while the host is in a low-memory
348 * condition. This latter should be handled elsewhere and in a more
349 * controlled manner, it's on the @bugref{3170} todo list...
350 */
351int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
352{
353 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
354
355 /*
356 * Prereqs.
357 */
358 Assert(PGMIsLocked(pVM));
359 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
360 Assert(!PGM_PAGE_IS_MMIO(pPage));
361
362
363 /*
364 * Flush any shadow page table mappings of the page.
365 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
366 */
367 bool fFlushTLBs = false;
368 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
369 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
370
371 /*
372 * Ensure that we've got a page handy, take it and use it.
373 */
374 int rc2 = pgmPhysEnsureHandyPage(pVM);
375 if (RT_FAILURE(rc2))
376 {
377 if (fFlushTLBs)
378 PGM_INVL_ALL_VCPU_TLBS(pVM);
379 Assert(rc2 == VERR_EM_NO_MEMORY);
380 return rc2;
381 }
382 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
383 Assert(PGMIsLocked(pVM));
384 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
385 Assert(!PGM_PAGE_IS_MMIO(pPage));
386
387 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
388 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
389 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
390 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
391 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
392 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
393
394 /*
395 * There are one or two action to be taken the next time we allocate handy pages:
396 * - Tell the GMM (global memory manager) what the page is being used for.
397 * (Speeds up replacement operations - sharing and defragmenting.)
398 * - If the current backing is shared, it must be freed.
399 */
400 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
401 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
402
403 if (PGM_PAGE_IS_SHARED(pPage))
404 {
405 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
406 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
407 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
408
409 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
410 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
411 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
412 pVM->pgm.s.cSharedPages--;
413 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
414 }
415 else
416 {
417 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
418 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
419 pVM->pgm.s.cZeroPages--;
420 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
421 }
422
423 /*
424 * Do the PGMPAGE modifications.
425 */
426 pVM->pgm.s.cPrivatePages++;
427 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
428 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
429 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
430 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
431
432 if ( fFlushTLBs
433 && rc != VINF_PGM_GCPHYS_ALIASED)
434 PGM_INVL_ALL_VCPU_TLBS(pVM);
435 return rc;
436}
437
438
439/**
440 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
441 *
442 * @returns VBox strict status code.
443 * @retval VINF_SUCCESS on success.
444 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
445 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
446 *
447 * @param pVM The VM address.
448 * @param pPage The physical page tracking structure.
449 * @param GCPhys The address of the page.
450 *
451 * @remarks Called from within the PGM critical section.
452 */
453int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
454{
455 switch (PGM_PAGE_GET_STATE(pPage))
456 {
457 case PGM_PAGE_STATE_WRITE_MONITORED:
458 PGM_PAGE_SET_WRITTEN_TO(pPage);
459 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
460 Assert(pVM->pgm.s.cMonitoredPages > 0);
461 pVM->pgm.s.cMonitoredPages--;
462 pVM->pgm.s.cWrittenToPages++;
463 /* fall thru */
464 default: /* to shut up GCC */
465 case PGM_PAGE_STATE_ALLOCATED:
466 return VINF_SUCCESS;
467
468 /*
469 * Zero pages can be dummy pages for MMIO or reserved memory,
470 * so we need to check the flags before joining cause with
471 * shared page replacement.
472 */
473 case PGM_PAGE_STATE_ZERO:
474 if (PGM_PAGE_IS_MMIO(pPage))
475 return VERR_PGM_PHYS_PAGE_RESERVED;
476 /* fall thru */
477 case PGM_PAGE_STATE_SHARED:
478 return pgmPhysAllocPage(pVM, pPage, GCPhys);
479 }
480}
481
482
483/**
484 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
485 *
486 * @returns VBox strict status code.
487 * @retval VINF_SUCCESS on success.
488 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
489 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
490 *
491 * @param pVM The VM address.
492 * @param pPage The physical page tracking structure.
493 * @param GCPhys The address of the page.
494 */
495int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
496{
497 int rc = pgmLock(pVM);
498 if (RT_SUCCESS(rc))
499 {
500 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
501 pgmUnlock(pVM);
502 }
503 return rc;
504}
505
506
507/**
508 * Internal usage: Map the page specified by its GMM ID.
509 *
510 * This is similar to pgmPhysPageMap
511 *
512 * @returns VBox status code.
513 *
514 * @param pVM The VM handle.
515 * @param idPage The Page ID.
516 * @param HCPhys The physical address (for RC).
517 * @param ppv Where to store the mapping address.
518 *
519 * @remarks Called from within the PGM critical section. The mapping is only
520 * valid while your inside this section.
521 */
522int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
523{
524 /*
525 * Validation.
526 */
527 Assert(PGMIsLocked(pVM));
528 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
529 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
530 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
531
532#ifdef IN_RC
533 /*
534 * Map it by HCPhys.
535 */
536 return PGMDynMapHCPage(pVM, HCPhys, ppv);
537
538#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
539 /*
540 * Map it by HCPhys.
541 */
542 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
543
544#else
545 /*
546 * Find/make Chunk TLB entry for the mapping chunk.
547 */
548 PPGMCHUNKR3MAP pMap;
549 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
550 if (pTlbe->idChunk == idChunk)
551 {
552 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
553 pMap = pTlbe->pChunk;
554 }
555 else
556 {
557 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
558
559 /*
560 * Find the chunk, map it if necessary.
561 */
562 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
563 if (!pMap)
564 {
565# ifdef IN_RING0
566 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
567 AssertRCReturn(rc, rc);
568 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
569 Assert(pMap);
570# else
571 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
572 if (RT_FAILURE(rc))
573 return rc;
574# endif
575 }
576
577 /*
578 * Enter it into the Chunk TLB.
579 */
580 pTlbe->idChunk = idChunk;
581 pTlbe->pChunk = pMap;
582 pMap->iAge = 0;
583 }
584
585 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
586 return VINF_SUCCESS;
587#endif
588}
589
590
591/**
592 * Maps a page into the current virtual address space so it can be accessed.
593 *
594 * @returns VBox status code.
595 * @retval VINF_SUCCESS on success.
596 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
597 *
598 * @param pVM The VM address.
599 * @param pPage The physical page tracking structure.
600 * @param GCPhys The address of the page.
601 * @param ppMap Where to store the address of the mapping tracking structure.
602 * @param ppv Where to store the mapping address of the page. The page
603 * offset is masked off!
604 *
605 * @remarks Called from within the PGM critical section.
606 */
607static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
608{
609 Assert(PGMIsLocked(pVM));
610
611#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
612 /*
613 * Just some sketchy GC/R0-darwin code.
614 */
615 *ppMap = NULL;
616 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
617 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
618# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
619 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
620# else
621 PGMDynMapHCPage(pVM, HCPhys, ppv);
622# endif
623 return VINF_SUCCESS;
624
625#else /* IN_RING3 || IN_RING0 */
626
627
628 /*
629 * Special case: ZERO and MMIO2 pages.
630 */
631 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
632 if (idChunk == NIL_GMM_CHUNKID)
633 {
634 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
635 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
636 {
637 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
638 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
639 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
640 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
641 }
642 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
643 {
644 /** @todo deal with aliased MMIO2 pages somehow...
645 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
646 * them, that would also avoid this mess. It would actually be kind of
647 * elegant... */
648 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
649 }
650 else
651 {
652 /** @todo handle MMIO2 */
653 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
654 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
655 ("pPage=%R[pgmpage]\n", pPage),
656 VERR_INTERNAL_ERROR_2);
657 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
658 }
659 *ppMap = NULL;
660 return VINF_SUCCESS;
661 }
662
663 /*
664 * Find/make Chunk TLB entry for the mapping chunk.
665 */
666 PPGMCHUNKR3MAP pMap;
667 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
668 if (pTlbe->idChunk == idChunk)
669 {
670 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
671 pMap = pTlbe->pChunk;
672 }
673 else
674 {
675 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
676
677 /*
678 * Find the chunk, map it if necessary.
679 */
680 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
681 if (!pMap)
682 {
683#ifdef IN_RING0
684 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
685 AssertRCReturn(rc, rc);
686 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
687 Assert(pMap);
688#else
689 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
690 if (RT_FAILURE(rc))
691 return rc;
692#endif
693 }
694
695 /*
696 * Enter it into the Chunk TLB.
697 */
698 pTlbe->idChunk = idChunk;
699 pTlbe->pChunk = pMap;
700 pMap->iAge = 0;
701 }
702
703 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
704 *ppMap = pMap;
705 return VINF_SUCCESS;
706#endif /* IN_RING3 */
707}
708
709
710/**
711 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
712 *
713 * This is typically used is paths where we cannot use the TLB methods (like ROM
714 * pages) or where there is no point in using them since we won't get many hits.
715 *
716 * @returns VBox strict status code.
717 * @retval VINF_SUCCESS on success.
718 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
719 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
720 *
721 * @param pVM The VM address.
722 * @param pPage The physical page tracking structure.
723 * @param GCPhys The address of the page.
724 * @param ppv Where to store the mapping address of the page. The page
725 * offset is masked off!
726 *
727 * @remarks Called from within the PGM critical section. The mapping is only
728 * valid while your inside this section.
729 */
730int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
731{
732 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
733 if (RT_SUCCESS(rc))
734 {
735 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
736 PPGMPAGEMAP pMapIgnore;
737 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
738 if (RT_FAILURE(rc2)) /* preserve rc */
739 rc = rc2;
740 }
741 return rc;
742}
743
744
745/**
746 * Maps a page into the current virtual address space so it can be accessed for
747 * both writing and reading.
748 *
749 * This is typically used is paths where we cannot use the TLB methods (like ROM
750 * pages) or where there is no point in using them since we won't get many hits.
751 *
752 * @returns VBox status code.
753 * @retval VINF_SUCCESS on success.
754 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
755 *
756 * @param pVM The VM address.
757 * @param pPage The physical page tracking structure. Must be in the
758 * allocated state.
759 * @param GCPhys The address of the page.
760 * @param ppv Where to store the mapping address of the page. The page
761 * offset is masked off!
762 *
763 * @remarks Called from within the PGM critical section. The mapping is only
764 * valid while your inside this section.
765 */
766int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
767{
768 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
769 PPGMPAGEMAP pMapIgnore;
770 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
771}
772
773
774/**
775 * Maps a page into the current virtual address space so it can be accessed for
776 * reading.
777 *
778 * This is typically used is paths where we cannot use the TLB methods (like ROM
779 * pages) or where there is no point in using them since we won't get many hits.
780 *
781 * @returns VBox status code.
782 * @retval VINF_SUCCESS on success.
783 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
784 *
785 * @param pVM The VM address.
786 * @param pPage The physical page tracking structure.
787 * @param GCPhys The address of the page.
788 * @param ppv Where to store the mapping address of the page. The page
789 * offset is masked off!
790 *
791 * @remarks Called from within the PGM critical section. The mapping is only
792 * valid while your inside this section.
793 */
794int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
795{
796 PPGMPAGEMAP pMapIgnore;
797 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
798}
799
800
801#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
802/**
803 * Load a guest page into the ring-3 physical TLB.
804 *
805 * @returns VBox status code.
806 * @retval VINF_SUCCESS on success
807 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
808 * @param pPGM The PGM instance pointer.
809 * @param GCPhys The guest physical address in question.
810 */
811int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
812{
813 Assert(PGMIsLocked(PGM2VM(pPGM)));
814 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
815
816 /*
817 * Find the ram range.
818 * 99.8% of requests are expected to be in the first range.
819 */
820 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
821 RTGCPHYS off = GCPhys - pRam->GCPhys;
822 if (RT_UNLIKELY(off >= pRam->cb))
823 {
824 do
825 {
826 pRam = pRam->CTX_SUFF(pNext);
827 if (!pRam)
828 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
829 off = GCPhys - pRam->GCPhys;
830 } while (off >= pRam->cb);
831 }
832
833 /*
834 * Map the page.
835 * Make a special case for the zero page as it is kind of special.
836 */
837 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
838 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
839 if (!PGM_PAGE_IS_ZERO(pPage))
840 {
841 void *pv;
842 PPGMPAGEMAP pMap;
843 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
844 if (RT_FAILURE(rc))
845 return rc;
846 pTlbe->pMap = pMap;
847 pTlbe->pv = pv;
848 }
849 else
850 {
851 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
852 pTlbe->pMap = NULL;
853 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
854 }
855#if 1 /* Testing */
856 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
857#endif
858 pTlbe->pPage = pPage;
859 return VINF_SUCCESS;
860}
861
862
863/**
864 * Load a guest page into the ring-3 physical TLB.
865 *
866 * @returns VBox status code.
867 * @retval VINF_SUCCESS on success
868 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
869 *
870 * @param pPGM The PGM instance pointer.
871 * @param pPage Pointer to the PGMPAGE structure corresponding to
872 * GCPhys.
873 * @param GCPhys The guest physical address in question.
874 */
875int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
876{
877 Assert(PGMIsLocked(PGM2VM(pPGM)));
878 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
879
880 /*
881 * Map the page.
882 * Make a special case for the zero page as it is kind of special.
883 */
884 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
885 if (!PGM_PAGE_IS_ZERO(pPage))
886 {
887 void *pv;
888 PPGMPAGEMAP pMap;
889 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
890 if (RT_FAILURE(rc))
891 return rc;
892 pTlbe->pMap = pMap;
893 pTlbe->pv = pv;
894 }
895 else
896 {
897 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
898 pTlbe->pMap = NULL;
899 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
900 }
901#if 1 /* Testing */
902 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
903#endif
904 pTlbe->pPage = pPage;
905 return VINF_SUCCESS;
906}
907#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
908
909
910/**
911 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
912 * own the PGM lock and therefore not need to lock the mapped page.
913 *
914 * @returns VBox status code.
915 * @retval VINF_SUCCESS on success.
916 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
917 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
918 *
919 * @param pVM The VM handle.
920 * @param GCPhys The guest physical address of the page that should be mapped.
921 * @param pPage Pointer to the PGMPAGE structure for the page.
922 * @param ppv Where to store the address corresponding to GCPhys.
923 *
924 * @internal
925 */
926int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
927{
928 int rc;
929 AssertReturn(pPage, VERR_INTERNAL_ERROR);
930 Assert(PGMIsLocked(pVM));
931
932 /*
933 * Make sure the page is writable.
934 */
935 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
936 {
937 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
938 if (RT_FAILURE(rc))
939 return rc;
940 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
941 }
942 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
943
944 /*
945 * Get the mapping address.
946 */
947#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
948 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
949#else
950 PPGMPAGEMAPTLBE pTlbe;
951 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
952 if (RT_FAILURE(rc))
953 return rc;
954 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
955#endif
956 return VINF_SUCCESS;
957}
958
959
960/**
961 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
962 * own the PGM lock and therefore not need to lock the mapped page.
963 *
964 * @returns VBox status code.
965 * @retval VINF_SUCCESS on success.
966 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
967 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
968 *
969 * @param pVM The VM handle.
970 * @param GCPhys The guest physical address of the page that should be mapped.
971 * @param pPage Pointer to the PGMPAGE structure for the page.
972 * @param ppv Where to store the address corresponding to GCPhys.
973 *
974 * @internal
975 */
976int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
977{
978 AssertReturn(pPage, VERR_INTERNAL_ERROR);
979 Assert(PGMIsLocked(pVM));
980 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
981
982 /*
983 * Get the mapping address.
984 */
985#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
986 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
987#else
988 PPGMPAGEMAPTLBE pTlbe;
989 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
990 if (RT_FAILURE(rc))
991 return rc;
992 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
993#endif
994 return VINF_SUCCESS;
995}
996
997
998/**
999 * Requests the mapping of a guest page into the current context.
1000 *
1001 * This API should only be used for very short term, as it will consume
1002 * scarse resources (R0 and GC) in the mapping cache. When you're done
1003 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1004 *
1005 * This API will assume your intention is to write to the page, and will
1006 * therefore replace shared and zero pages. If you do not intend to modify
1007 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1008 *
1009 * @returns VBox status code.
1010 * @retval VINF_SUCCESS on success.
1011 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1012 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1013 *
1014 * @param pVM The VM handle.
1015 * @param GCPhys The guest physical address of the page that should be mapped.
1016 * @param ppv Where to store the address corresponding to GCPhys.
1017 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1018 *
1019 * @remarks The caller is responsible for dealing with access handlers.
1020 * @todo Add an informational return code for pages with access handlers?
1021 *
1022 * @remark Avoid calling this API from within critical sections (other than the
1023 * PGM one) because of the deadlock risk. External threads may need to
1024 * delegate jobs to the EMTs.
1025 * @thread Any thread.
1026 */
1027VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1028{
1029#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1030
1031 /*
1032 * Find the page and make sure it's writable.
1033 */
1034 PPGMPAGE pPage;
1035 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1036 if (RT_SUCCESS(rc))
1037 {
1038 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1039 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1040 if (RT_SUCCESS(rc))
1041 {
1042 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1043# if 0
1044 pLock->pvMap = 0;
1045 pLock->pvPage = pPage;
1046# else
1047 pLock->u32Dummy = UINT32_MAX;
1048# endif
1049 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1050 rc = VINF_SUCCESS;
1051 }
1052 }
1053
1054#else /* IN_RING3 || IN_RING0 */
1055 int rc = pgmLock(pVM);
1056 AssertRCReturn(rc, rc);
1057
1058 /*
1059 * Query the Physical TLB entry for the page (may fail).
1060 */
1061 PPGMPAGEMAPTLBE pTlbe;
1062 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1063 if (RT_SUCCESS(rc))
1064 {
1065 /*
1066 * If the page is shared, the zero page, or being write monitored
1067 * it must be converted to an page that's writable if possible.
1068 */
1069 PPGMPAGE pPage = pTlbe->pPage;
1070 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1071 {
1072 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1073 if (RT_SUCCESS(rc))
1074 {
1075 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1076 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1077 }
1078 }
1079 if (RT_SUCCESS(rc))
1080 {
1081 /*
1082 * Now, just perform the locking and calculate the return address.
1083 */
1084 PPGMPAGEMAP pMap = pTlbe->pMap;
1085 if (pMap)
1086 pMap->cRefs++;
1087
1088 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1089 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1090 {
1091 if (cLocks == 0)
1092 pVM->pgm.s.cWriteLockedPages++;
1093 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1094 }
1095 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1096 {
1097 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1098 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1099 if (pMap)
1100 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1101 }
1102
1103 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1104 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1105 pLock->pvMap = pMap;
1106 }
1107 }
1108
1109 pgmUnlock(pVM);
1110#endif /* IN_RING3 || IN_RING0 */
1111 return rc;
1112}
1113
1114
1115/**
1116 * Requests the mapping of a guest page into the current context.
1117 *
1118 * This API should only be used for very short term, as it will consume
1119 * scarse resources (R0 and GC) in the mapping cache. When you're done
1120 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1121 *
1122 * @returns VBox status code.
1123 * @retval VINF_SUCCESS on success.
1124 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1125 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1126 *
1127 * @param pVM The VM handle.
1128 * @param GCPhys The guest physical address of the page that should be mapped.
1129 * @param ppv Where to store the address corresponding to GCPhys.
1130 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1131 *
1132 * @remarks The caller is responsible for dealing with access handlers.
1133 * @todo Add an informational return code for pages with access handlers?
1134 *
1135 * @remark Avoid calling this API from within critical sections (other than
1136 * the PGM one) because of the deadlock risk.
1137 * @thread Any thread.
1138 */
1139VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1140{
1141#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1142
1143 /*
1144 * Find the page and make sure it's readable.
1145 */
1146 PPGMPAGE pPage;
1147 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1148 if (RT_SUCCESS(rc))
1149 {
1150 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1151 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1152 else
1153 {
1154 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1155# if 0
1156 pLock->pvMap = 0;
1157 pLock->pvPage = pPage;
1158# else
1159 pLock->u32Dummy = UINT32_MAX;
1160# endif
1161 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1162 rc = VINF_SUCCESS;
1163 }
1164 }
1165
1166#else /* IN_RING3 || IN_RING0 */
1167 int rc = pgmLock(pVM);
1168 AssertRCReturn(rc, rc);
1169
1170 /*
1171 * Query the Physical TLB entry for the page (may fail).
1172 */
1173 PPGMPAGEMAPTLBE pTlbe;
1174 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1175 if (RT_SUCCESS(rc))
1176 {
1177 /* MMIO pages doesn't have any readable backing. */
1178 PPGMPAGE pPage = pTlbe->pPage;
1179 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1180 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1181 else
1182 {
1183 /*
1184 * Now, just perform the locking and calculate the return address.
1185 */
1186 PPGMPAGEMAP pMap = pTlbe->pMap;
1187 if (pMap)
1188 pMap->cRefs++;
1189
1190 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1191 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1192 {
1193 if (cLocks == 0)
1194 pVM->pgm.s.cReadLockedPages++;
1195 PGM_PAGE_INC_READ_LOCKS(pPage);
1196 }
1197 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1198 {
1199 PGM_PAGE_INC_READ_LOCKS(pPage);
1200 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1201 if (pMap)
1202 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1203 }
1204
1205 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1206 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1207 pLock->pvMap = pMap;
1208 }
1209 }
1210
1211 pgmUnlock(pVM);
1212#endif /* IN_RING3 || IN_RING0 */
1213 return rc;
1214}
1215
1216
1217/**
1218 * Requests the mapping of a guest page given by virtual address into the current context.
1219 *
1220 * This API should only be used for very short term, as it will consume
1221 * scarse resources (R0 and GC) in the mapping cache. When you're done
1222 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1223 *
1224 * This API will assume your intention is to write to the page, and will
1225 * therefore replace shared and zero pages. If you do not intend to modify
1226 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1227 *
1228 * @returns VBox status code.
1229 * @retval VINF_SUCCESS on success.
1230 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1231 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1232 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1233 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1234 *
1235 * @param pVCpu VMCPU handle.
1236 * @param GCPhys The guest physical address of the page that should be mapped.
1237 * @param ppv Where to store the address corresponding to GCPhys.
1238 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1239 *
1240 * @remark Avoid calling this API from within critical sections (other than
1241 * the PGM one) because of the deadlock risk.
1242 * @thread EMT
1243 */
1244VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1245{
1246 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1247 RTGCPHYS GCPhys;
1248 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1249 if (RT_SUCCESS(rc))
1250 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1251 return rc;
1252}
1253
1254
1255/**
1256 * Requests the mapping of a guest page given by virtual address into the current context.
1257 *
1258 * This API should only be used for very short term, as it will consume
1259 * scarse resources (R0 and GC) in the mapping cache. When you're done
1260 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1261 *
1262 * @returns VBox status code.
1263 * @retval VINF_SUCCESS on success.
1264 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1265 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1266 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1267 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1268 *
1269 * @param pVCpu VMCPU handle.
1270 * @param GCPhys The guest physical address of the page that should be mapped.
1271 * @param ppv Where to store the address corresponding to GCPhys.
1272 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1273 *
1274 * @remark Avoid calling this API from within critical sections (other than
1275 * the PGM one) because of the deadlock risk.
1276 * @thread EMT
1277 */
1278VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1279{
1280 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1281 RTGCPHYS GCPhys;
1282 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1283 if (RT_SUCCESS(rc))
1284 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1285 return rc;
1286}
1287
1288
1289/**
1290 * Release the mapping of a guest page.
1291 *
1292 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1293 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1294 *
1295 * @param pVM The VM handle.
1296 * @param pLock The lock structure initialized by the mapping function.
1297 */
1298VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1299{
1300#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1301 /* currently nothing to do here. */
1302 Assert(pLock->u32Dummy == UINT32_MAX);
1303 pLock->u32Dummy = 0;
1304
1305#else /* IN_RING3 */
1306 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1307 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1308 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1309
1310 pLock->uPageAndType = 0;
1311 pLock->pvMap = NULL;
1312
1313 pgmLock(pVM);
1314 if (fWriteLock)
1315 {
1316 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1317 Assert(cLocks > 0);
1318 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1319 {
1320 if (cLocks == 1)
1321 {
1322 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1323 pVM->pgm.s.cWriteLockedPages--;
1324 }
1325 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1326 }
1327
1328 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1329 {
1330 PGM_PAGE_SET_WRITTEN_TO(pPage);
1331 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1332 Assert(pVM->pgm.s.cMonitoredPages > 0);
1333 pVM->pgm.s.cMonitoredPages--;
1334 pVM->pgm.s.cWrittenToPages++;
1335 }
1336 }
1337 else
1338 {
1339 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1340 Assert(cLocks > 0);
1341 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1342 {
1343 if (cLocks == 1)
1344 {
1345 Assert(pVM->pgm.s.cReadLockedPages > 0);
1346 pVM->pgm.s.cReadLockedPages--;
1347 }
1348 PGM_PAGE_DEC_READ_LOCKS(pPage);
1349 }
1350 }
1351
1352 if (pMap)
1353 {
1354 Assert(pMap->cRefs >= 1);
1355 pMap->cRefs--;
1356 pMap->iAge = 0;
1357 }
1358 pgmUnlock(pVM);
1359#endif /* IN_RING3 */
1360}
1361
1362
1363/**
1364 * Converts a GC physical address to a HC ring-3 pointer.
1365 *
1366 * @returns VINF_SUCCESS on success.
1367 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1368 * page but has no physical backing.
1369 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1370 * GC physical address.
1371 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1372 * a dynamic ram chunk boundary
1373 *
1374 * @param pVM The VM handle.
1375 * @param GCPhys The GC physical address to convert.
1376 * @param cbRange Physical range
1377 * @param pR3Ptr Where to store the R3 pointer on success.
1378 *
1379 * @deprecated Avoid when possible!
1380 */
1381VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1382{
1383/** @todo this is kind of hacky and needs some more work. */
1384#ifndef DEBUG_sandervl
1385 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1386#endif
1387
1388 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1389#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1390 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1391#else
1392 pgmLock(pVM);
1393
1394 PPGMRAMRANGE pRam;
1395 PPGMPAGE pPage;
1396 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1397 if (RT_SUCCESS(rc))
1398 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1399
1400 pgmUnlock(pVM);
1401 Assert(rc <= VINF_SUCCESS);
1402 return rc;
1403#endif
1404}
1405
1406
1407#ifdef VBOX_STRICT
1408/**
1409 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1410 *
1411 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1412 * @param pVM The VM handle.
1413 * @param GCPhys The GC Physical addresss.
1414 * @param cbRange Physical range.
1415 *
1416 * @deprecated Avoid when possible.
1417 */
1418VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1419{
1420 RTR3PTR R3Ptr;
1421 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1422 if (RT_SUCCESS(rc))
1423 return R3Ptr;
1424 return NIL_RTR3PTR;
1425}
1426#endif /* VBOX_STRICT */
1427
1428
1429/**
1430 * Converts a guest pointer to a GC physical address.
1431 *
1432 * This uses the current CR3/CR0/CR4 of the guest.
1433 *
1434 * @returns VBox status code.
1435 * @param pVCpu The VMCPU Handle
1436 * @param GCPtr The guest pointer to convert.
1437 * @param pGCPhys Where to store the GC physical address.
1438 */
1439VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1440{
1441 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1442 if (pGCPhys && RT_SUCCESS(rc))
1443 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1444 return rc;
1445}
1446
1447
1448/**
1449 * Converts a guest pointer to a HC physical address.
1450 *
1451 * This uses the current CR3/CR0/CR4 of the guest.
1452 *
1453 * @returns VBox status code.
1454 * @param pVCpu The VMCPU Handle
1455 * @param GCPtr The guest pointer to convert.
1456 * @param pHCPhys Where to store the HC physical address.
1457 */
1458VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1459{
1460 PVM pVM = pVCpu->CTX_SUFF(pVM);
1461 RTGCPHYS GCPhys;
1462 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1463 if (RT_SUCCESS(rc))
1464 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1465 return rc;
1466}
1467
1468
1469/**
1470 * Converts a guest pointer to a R3 pointer.
1471 *
1472 * This uses the current CR3/CR0/CR4 of the guest.
1473 *
1474 * @returns VBox status code.
1475 * @param pVCpu The VMCPU Handle
1476 * @param GCPtr The guest pointer to convert.
1477 * @param pR3Ptr Where to store the R3 virtual address.
1478 *
1479 * @deprecated Don't use this.
1480 */
1481VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1482{
1483 PVM pVM = pVCpu->CTX_SUFF(pVM);
1484 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1485 RTGCPHYS GCPhys;
1486 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1487 if (RT_SUCCESS(rc))
1488 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1489 return rc;
1490}
1491
1492
1493
1494#undef LOG_GROUP
1495#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1496
1497
1498#ifdef IN_RING3
1499/**
1500 * Cache PGMPhys memory access
1501 *
1502 * @param pVM VM Handle.
1503 * @param pCache Cache structure pointer
1504 * @param GCPhys GC physical address
1505 * @param pbHC HC pointer corresponding to physical page
1506 *
1507 * @thread EMT.
1508 */
1509static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1510{
1511 uint32_t iCacheIndex;
1512
1513 Assert(VM_IS_EMT(pVM));
1514
1515 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1516 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1517
1518 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1519
1520 ASMBitSet(&pCache->aEntries, iCacheIndex);
1521
1522 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1523 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1524}
1525#endif /* IN_RING3 */
1526
1527
1528/**
1529 * Deals with reading from a page with one or more ALL access handlers.
1530 *
1531 * @returns VBox status code. Can be ignored in ring-3.
1532 * @retval VINF_SUCCESS.
1533 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1534 *
1535 * @param pVM The VM handle.
1536 * @param pPage The page descriptor.
1537 * @param GCPhys The physical address to start reading at.
1538 * @param pvBuf Where to put the bits we read.
1539 * @param cb How much to read - less or equal to a page.
1540 */
1541static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1542{
1543 /*
1544 * The most frequent access here is MMIO and shadowed ROM.
1545 * The current code ASSUMES all these access handlers covers full pages!
1546 */
1547
1548 /*
1549 * Whatever we do we need the source page, map it first.
1550 */
1551 const void *pvSrc = NULL;
1552 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1553 if (RT_FAILURE(rc))
1554 {
1555 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1556 GCPhys, pPage, rc));
1557 memset(pvBuf, 0xff, cb);
1558 return VINF_SUCCESS;
1559 }
1560 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1561
1562 /*
1563 * Deal with any physical handlers.
1564 */
1565 PPGMPHYSHANDLER pPhys = NULL;
1566 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1567 {
1568#ifdef IN_RING3
1569 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1570 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1571 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1572 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1573 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1574 Assert(pPhys->CTX_SUFF(pfnHandler));
1575
1576 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1577 void *pvUser = pPhys->CTX_SUFF(pvUser);
1578
1579 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1580 STAM_PROFILE_START(&pPhys->Stat, h);
1581 Assert(PGMIsLockOwner(pVM));
1582 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1583 pgmUnlock(pVM);
1584 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1585 pgmLock(pVM);
1586# ifdef VBOX_WITH_STATISTICS
1587 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1588 if (pPhys)
1589 STAM_PROFILE_STOP(&pPhys->Stat, h);
1590# else
1591 pPhys = NULL; /* might not be valid anymore. */
1592# endif
1593 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1594#else
1595 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1596 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1597 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1598#endif
1599 }
1600
1601 /*
1602 * Deal with any virtual handlers.
1603 */
1604 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1605 {
1606 unsigned iPage;
1607 PPGMVIRTHANDLER pVirt;
1608
1609 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1610 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1611 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1612 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1613 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1614
1615#ifdef IN_RING3
1616 if (pVirt->pfnHandlerR3)
1617 {
1618 if (!pPhys)
1619 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1620 else
1621 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1622 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1623 + (iPage << PAGE_SHIFT)
1624 + (GCPhys & PAGE_OFFSET_MASK);
1625
1626 STAM_PROFILE_START(&pVirt->Stat, h);
1627 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1628 STAM_PROFILE_STOP(&pVirt->Stat, h);
1629 if (rc2 == VINF_SUCCESS)
1630 rc = VINF_SUCCESS;
1631 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1632 }
1633 else
1634 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1635#else
1636 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1637 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1638 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1639#endif
1640 }
1641
1642 /*
1643 * Take the default action.
1644 */
1645 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1646 memcpy(pvBuf, pvSrc, cb);
1647 return rc;
1648}
1649
1650
1651/**
1652 * Read physical memory.
1653 *
1654 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1655 * want to ignore those.
1656 *
1657 * @returns VBox status code. Can be ignored in ring-3.
1658 * @retval VINF_SUCCESS.
1659 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1660 *
1661 * @param pVM VM Handle.
1662 * @param GCPhys Physical address start reading from.
1663 * @param pvBuf Where to put the read bits.
1664 * @param cbRead How many bytes to read.
1665 */
1666VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1667{
1668 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1669 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1670
1671 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1672 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1673
1674 pgmLock(pVM);
1675
1676 /*
1677 * Copy loop on ram ranges.
1678 */
1679 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1680 for (;;)
1681 {
1682 /* Find range. */
1683 while (pRam && GCPhys > pRam->GCPhysLast)
1684 pRam = pRam->CTX_SUFF(pNext);
1685 /* Inside range or not? */
1686 if (pRam && GCPhys >= pRam->GCPhys)
1687 {
1688 /*
1689 * Must work our way thru this page by page.
1690 */
1691 RTGCPHYS off = GCPhys - pRam->GCPhys;
1692 while (off < pRam->cb)
1693 {
1694 unsigned iPage = off >> PAGE_SHIFT;
1695 PPGMPAGE pPage = &pRam->aPages[iPage];
1696 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1697 if (cb > cbRead)
1698 cb = cbRead;
1699
1700 /*
1701 * Any ALL access handlers?
1702 */
1703 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1704 {
1705 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1706 if (RT_FAILURE(rc))
1707 {
1708 pgmUnlock(pVM);
1709 return rc;
1710 }
1711 }
1712 else
1713 {
1714 /*
1715 * Get the pointer to the page.
1716 */
1717 const void *pvSrc;
1718 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1719 if (RT_SUCCESS(rc))
1720 memcpy(pvBuf, pvSrc, cb);
1721 else
1722 {
1723 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1724 pRam->GCPhys + off, pPage, rc));
1725 memset(pvBuf, 0xff, cb);
1726 }
1727 }
1728
1729 /* next page */
1730 if (cb >= cbRead)
1731 {
1732 pgmUnlock(pVM);
1733 return VINF_SUCCESS;
1734 }
1735 cbRead -= cb;
1736 off += cb;
1737 pvBuf = (char *)pvBuf + cb;
1738 } /* walk pages in ram range. */
1739
1740 GCPhys = pRam->GCPhysLast + 1;
1741 }
1742 else
1743 {
1744 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1745
1746 /*
1747 * Unassigned address space.
1748 */
1749 if (!pRam)
1750 break;
1751 size_t cb = pRam->GCPhys - GCPhys;
1752 if (cb >= cbRead)
1753 {
1754 memset(pvBuf, 0xff, cbRead);
1755 break;
1756 }
1757 memset(pvBuf, 0xff, cb);
1758
1759 cbRead -= cb;
1760 pvBuf = (char *)pvBuf + cb;
1761 GCPhys += cb;
1762 }
1763 } /* Ram range walk */
1764
1765 pgmUnlock(pVM);
1766 return VINF_SUCCESS;
1767}
1768
1769
1770/**
1771 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1772 *
1773 * @returns VBox status code. Can be ignored in ring-3.
1774 * @retval VINF_SUCCESS.
1775 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1776 *
1777 * @param pVM The VM handle.
1778 * @param pPage The page descriptor.
1779 * @param GCPhys The physical address to start writing at.
1780 * @param pvBuf What to write.
1781 * @param cbWrite How much to write - less or equal to a page.
1782 */
1783static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1784{
1785 void *pvDst = NULL;
1786 int rc;
1787
1788 /*
1789 * Give priority to physical handlers (like #PF does).
1790 *
1791 * Hope for a lonely physical handler first that covers the whole
1792 * write area. This should be a pretty frequent case with MMIO and
1793 * the heavy usage of full page handlers in the page pool.
1794 */
1795 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1796 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1797 {
1798 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1799 if (pCur)
1800 {
1801 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1802 Assert(pCur->CTX_SUFF(pfnHandler));
1803
1804 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1805 if (cbRange > cbWrite)
1806 cbRange = cbWrite;
1807
1808#ifndef IN_RING3
1809 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1810 NOREF(cbRange);
1811 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1812 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1813
1814#else /* IN_RING3 */
1815 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1816 if (!PGM_PAGE_IS_MMIO(pPage))
1817 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1818 else
1819 rc = VINF_SUCCESS;
1820 if (RT_SUCCESS(rc))
1821 {
1822 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
1823 void *pvUser = pCur->CTX_SUFF(pvUser);
1824
1825 STAM_PROFILE_START(&pCur->Stat, h);
1826 Assert(PGMIsLockOwner(pVM));
1827 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1828 pgmUnlock(pVM);
1829 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
1830 pgmLock(pVM);
1831# ifdef VBOX_WITH_STATISTICS
1832 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1833 if (pCur)
1834 STAM_PROFILE_STOP(&pCur->Stat, h);
1835# else
1836 pCur = NULL; /* might not be valid anymore. */
1837# endif
1838 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1839 memcpy(pvDst, pvBuf, cbRange);
1840 else
1841 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
1842 }
1843 else
1844 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1845 GCPhys, pPage, rc), rc);
1846 if (RT_LIKELY(cbRange == cbWrite))
1847 return VINF_SUCCESS;
1848
1849 /* more fun to be had below */
1850 cbWrite -= cbRange;
1851 GCPhys += cbRange;
1852 pvBuf = (uint8_t *)pvBuf + cbRange;
1853 pvDst = (uint8_t *)pvDst + cbRange;
1854#endif /* IN_RING3 */
1855 }
1856 /* else: the handler is somewhere else in the page, deal with it below. */
1857 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1858 }
1859 /*
1860 * A virtual handler without any interfering physical handlers.
1861 * Hopefully it'll conver the whole write.
1862 */
1863 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1864 {
1865 unsigned iPage;
1866 PPGMVIRTHANDLER pCur;
1867 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1868 if (RT_SUCCESS(rc))
1869 {
1870 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1871 if (cbRange > cbWrite)
1872 cbRange = cbWrite;
1873
1874#ifndef IN_RING3
1875 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1876 NOREF(cbRange);
1877 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1878 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1879
1880#else /* IN_RING3 */
1881
1882 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1883 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1884 if (RT_SUCCESS(rc))
1885 {
1886 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1887 if (pCur->pfnHandlerR3)
1888 {
1889 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1890 + (iPage << PAGE_SHIFT)
1891 + (GCPhys & PAGE_OFFSET_MASK);
1892
1893 STAM_PROFILE_START(&pCur->Stat, h);
1894 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1895 STAM_PROFILE_STOP(&pCur->Stat, h);
1896 }
1897 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1898 memcpy(pvDst, pvBuf, cbRange);
1899 else
1900 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1901 }
1902 else
1903 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1904 GCPhys, pPage, rc), rc);
1905 if (RT_LIKELY(cbRange == cbWrite))
1906 return VINF_SUCCESS;
1907
1908 /* more fun to be had below */
1909 cbWrite -= cbRange;
1910 GCPhys += cbRange;
1911 pvBuf = (uint8_t *)pvBuf + cbRange;
1912 pvDst = (uint8_t *)pvDst + cbRange;
1913#endif
1914 }
1915 /* else: the handler is somewhere else in the page, deal with it below. */
1916 }
1917
1918 /*
1919 * Deal with all the odd ends.
1920 */
1921
1922 /* We need a writable destination page. */
1923 if (!pvDst)
1924 {
1925 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1926 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1927 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1928 GCPhys, pPage, rc), rc);
1929 }
1930
1931 /* The loop state (big + ugly). */
1932 unsigned iVirtPage = 0;
1933 PPGMVIRTHANDLER pVirt = NULL;
1934 uint32_t offVirt = PAGE_SIZE;
1935 uint32_t offVirtLast = PAGE_SIZE;
1936 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1937
1938 PPGMPHYSHANDLER pPhys = NULL;
1939 uint32_t offPhys = PAGE_SIZE;
1940 uint32_t offPhysLast = PAGE_SIZE;
1941 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1942
1943 /* The loop. */
1944 for (;;)
1945 {
1946 /*
1947 * Find the closest handler at or above GCPhys.
1948 */
1949 if (fMoreVirt && !pVirt)
1950 {
1951 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1952 if (RT_SUCCESS(rc))
1953 {
1954 offVirt = 0;
1955 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1956 }
1957 else
1958 {
1959 PPGMPHYS2VIRTHANDLER pVirtPhys;
1960 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1961 GCPhys, true /* fAbove */);
1962 if ( pVirtPhys
1963 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1964 {
1965 /* ASSUME that pVirtPhys only covers one page. */
1966 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1967 Assert(pVirtPhys->Core.Key > GCPhys);
1968
1969 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1970 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1971 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1972 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1973 }
1974 else
1975 {
1976 pVirt = NULL;
1977 fMoreVirt = false;
1978 offVirt = offVirtLast = PAGE_SIZE;
1979 }
1980 }
1981 }
1982
1983 if (fMorePhys && !pPhys)
1984 {
1985 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1986 if (pPhys)
1987 {
1988 offPhys = 0;
1989 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1990 }
1991 else
1992 {
1993 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
1994 GCPhys, true /* fAbove */);
1995 if ( pPhys
1996 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
1997 {
1998 offPhys = pPhys->Core.Key - GCPhys;
1999 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2000 }
2001 else
2002 {
2003 pPhys = NULL;
2004 fMorePhys = false;
2005 offPhys = offPhysLast = PAGE_SIZE;
2006 }
2007 }
2008 }
2009
2010 /*
2011 * Handle access to space without handlers (that's easy).
2012 */
2013 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2014 uint32_t cbRange = (uint32_t)cbWrite;
2015 if (offPhys && offVirt)
2016 {
2017 if (cbRange > offPhys)
2018 cbRange = offPhys;
2019 if (cbRange > offVirt)
2020 cbRange = offVirt;
2021 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2022 }
2023 /*
2024 * Physical handler.
2025 */
2026 else if (!offPhys && offVirt)
2027 {
2028 if (cbRange > offPhysLast + 1)
2029 cbRange = offPhysLast + 1;
2030 if (cbRange > offVirt)
2031 cbRange = offVirt;
2032#ifdef IN_RING3
2033 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2034 void *pvUser = pPhys->CTX_SUFF(pvUser);
2035
2036 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2037 STAM_PROFILE_START(&pPhys->Stat, h);
2038 Assert(PGMIsLockOwner(pVM));
2039 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2040 pgmUnlock(pVM);
2041 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2042 pgmLock(pVM);
2043# ifdef VBOX_WITH_STATISTICS
2044 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2045 if (pPhys)
2046 STAM_PROFILE_STOP(&pPhys->Stat, h);
2047# else
2048 pPhys = NULL; /* might not be valid anymore. */
2049# endif
2050 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2051#else
2052 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2053 NOREF(cbRange);
2054 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2055 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2056#endif
2057 }
2058 /*
2059 * Virtual handler.
2060 */
2061 else if (offPhys && !offVirt)
2062 {
2063 if (cbRange > offVirtLast + 1)
2064 cbRange = offVirtLast + 1;
2065 if (cbRange > offPhys)
2066 cbRange = offPhys;
2067#ifdef IN_RING3
2068 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2069 if (pVirt->pfnHandlerR3)
2070 {
2071 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2072 + (iVirtPage << PAGE_SHIFT)
2073 + (GCPhys & PAGE_OFFSET_MASK);
2074 STAM_PROFILE_START(&pVirt->Stat, h);
2075 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2076 STAM_PROFILE_STOP(&pVirt->Stat, h);
2077 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2078 }
2079 pVirt = NULL;
2080#else
2081 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2082 NOREF(cbRange);
2083 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2084 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2085#endif
2086 }
2087 /*
2088 * Both... give the physical one priority.
2089 */
2090 else
2091 {
2092 Assert(!offPhys && !offVirt);
2093 if (cbRange > offVirtLast + 1)
2094 cbRange = offVirtLast + 1;
2095 if (cbRange > offPhysLast + 1)
2096 cbRange = offPhysLast + 1;
2097
2098#ifdef IN_RING3
2099 if (pVirt->pfnHandlerR3)
2100 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2101 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2102
2103 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2104 void *pvUser = pPhys->CTX_SUFF(pvUser);
2105
2106 STAM_PROFILE_START(&pPhys->Stat, h);
2107 Assert(PGMIsLockOwner(pVM));
2108 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2109 pgmUnlock(pVM);
2110 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2111 pgmLock(pVM);
2112# ifdef VBOX_WITH_STATISTICS
2113 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2114 if (pPhys)
2115 STAM_PROFILE_STOP(&pPhys->Stat, h);
2116# else
2117 pPhys = NULL; /* might not be valid anymore. */
2118# endif
2119 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2120 if (pVirt->pfnHandlerR3)
2121 {
2122
2123 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2124 + (iVirtPage << PAGE_SHIFT)
2125 + (GCPhys & PAGE_OFFSET_MASK);
2126 STAM_PROFILE_START(&pVirt->Stat, h);
2127 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2128 STAM_PROFILE_STOP(&pVirt->Stat, h);
2129 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2130 rc = VINF_SUCCESS;
2131 else
2132 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2133 }
2134 pPhys = NULL;
2135 pVirt = NULL;
2136#else
2137 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2138 NOREF(cbRange);
2139 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2140 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2141#endif
2142 }
2143 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2144 memcpy(pvDst, pvBuf, cbRange);
2145
2146 /*
2147 * Advance if we've got more stuff to do.
2148 */
2149 if (cbRange >= cbWrite)
2150 return VINF_SUCCESS;
2151
2152 cbWrite -= cbRange;
2153 GCPhys += cbRange;
2154 pvBuf = (uint8_t *)pvBuf + cbRange;
2155 pvDst = (uint8_t *)pvDst + cbRange;
2156
2157 offPhys -= cbRange;
2158 offPhysLast -= cbRange;
2159 offVirt -= cbRange;
2160 offVirtLast -= cbRange;
2161 }
2162}
2163
2164
2165/**
2166 * Write to physical memory.
2167 *
2168 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2169 * want to ignore those.
2170 *
2171 * @returns VBox status code. Can be ignored in ring-3.
2172 * @retval VINF_SUCCESS.
2173 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2174 *
2175 * @param pVM VM Handle.
2176 * @param GCPhys Physical address to write to.
2177 * @param pvBuf What to write.
2178 * @param cbWrite How many bytes to write.
2179 */
2180VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2181{
2182 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2183 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2184 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2185
2186 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2187 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2188
2189 pgmLock(pVM);
2190
2191 /*
2192 * Copy loop on ram ranges.
2193 */
2194 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2195 for (;;)
2196 {
2197 /* Find range. */
2198 while (pRam && GCPhys > pRam->GCPhysLast)
2199 pRam = pRam->CTX_SUFF(pNext);
2200 /* Inside range or not? */
2201 if (pRam && GCPhys >= pRam->GCPhys)
2202 {
2203 /*
2204 * Must work our way thru this page by page.
2205 */
2206 RTGCPTR off = GCPhys - pRam->GCPhys;
2207 while (off < pRam->cb)
2208 {
2209 RTGCPTR iPage = off >> PAGE_SHIFT;
2210 PPGMPAGE pPage = &pRam->aPages[iPage];
2211 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2212 if (cb > cbWrite)
2213 cb = cbWrite;
2214
2215 /*
2216 * Any active WRITE or ALL access handlers?
2217 */
2218 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2219 {
2220 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2221 if (RT_FAILURE(rc))
2222 {
2223 pgmUnlock(pVM);
2224 return rc;
2225 }
2226 }
2227 else
2228 {
2229 /*
2230 * Get the pointer to the page.
2231 */
2232 void *pvDst;
2233 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2234 if (RT_SUCCESS(rc))
2235 memcpy(pvDst, pvBuf, cb);
2236 else
2237 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2238 pRam->GCPhys + off, pPage, rc));
2239 }
2240
2241 /* next page */
2242 if (cb >= cbWrite)
2243 {
2244 pgmUnlock(pVM);
2245 return VINF_SUCCESS;
2246 }
2247
2248 cbWrite -= cb;
2249 off += cb;
2250 pvBuf = (const char *)pvBuf + cb;
2251 } /* walk pages in ram range */
2252
2253 GCPhys = pRam->GCPhysLast + 1;
2254 }
2255 else
2256 {
2257 /*
2258 * Unassigned address space, skip it.
2259 */
2260 if (!pRam)
2261 break;
2262 size_t cb = pRam->GCPhys - GCPhys;
2263 if (cb >= cbWrite)
2264 break;
2265 cbWrite -= cb;
2266 pvBuf = (const char *)pvBuf + cb;
2267 GCPhys += cb;
2268 }
2269 } /* Ram range walk */
2270
2271 pgmUnlock(pVM);
2272 return VINF_SUCCESS;
2273}
2274
2275
2276/**
2277 * Read from guest physical memory by GC physical address, bypassing
2278 * MMIO and access handlers.
2279 *
2280 * @returns VBox status.
2281 * @param pVM VM handle.
2282 * @param pvDst The destination address.
2283 * @param GCPhysSrc The source address (GC physical address).
2284 * @param cb The number of bytes to read.
2285 */
2286VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2287{
2288 /*
2289 * Treat the first page as a special case.
2290 */
2291 if (!cb)
2292 return VINF_SUCCESS;
2293
2294 /* map the 1st page */
2295 void const *pvSrc;
2296 PGMPAGEMAPLOCK Lock;
2297 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2298 if (RT_FAILURE(rc))
2299 return rc;
2300
2301 /* optimize for the case where access is completely within the first page. */
2302 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2303 if (RT_LIKELY(cb <= cbPage))
2304 {
2305 memcpy(pvDst, pvSrc, cb);
2306 PGMPhysReleasePageMappingLock(pVM, &Lock);
2307 return VINF_SUCCESS;
2308 }
2309
2310 /* copy to the end of the page. */
2311 memcpy(pvDst, pvSrc, cbPage);
2312 PGMPhysReleasePageMappingLock(pVM, &Lock);
2313 GCPhysSrc += cbPage;
2314 pvDst = (uint8_t *)pvDst + cbPage;
2315 cb -= cbPage;
2316
2317 /*
2318 * Page by page.
2319 */
2320 for (;;)
2321 {
2322 /* map the page */
2323 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2324 if (RT_FAILURE(rc))
2325 return rc;
2326
2327 /* last page? */
2328 if (cb <= PAGE_SIZE)
2329 {
2330 memcpy(pvDst, pvSrc, cb);
2331 PGMPhysReleasePageMappingLock(pVM, &Lock);
2332 return VINF_SUCCESS;
2333 }
2334
2335 /* copy the entire page and advance */
2336 memcpy(pvDst, pvSrc, PAGE_SIZE);
2337 PGMPhysReleasePageMappingLock(pVM, &Lock);
2338 GCPhysSrc += PAGE_SIZE;
2339 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2340 cb -= PAGE_SIZE;
2341 }
2342 /* won't ever get here. */
2343}
2344
2345
2346/**
2347 * Write to guest physical memory referenced by GC pointer.
2348 * Write memory to GC physical address in guest physical memory.
2349 *
2350 * This will bypass MMIO and access handlers.
2351 *
2352 * @returns VBox status.
2353 * @param pVM VM handle.
2354 * @param GCPhysDst The GC physical address of the destination.
2355 * @param pvSrc The source buffer.
2356 * @param cb The number of bytes to write.
2357 */
2358VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2359{
2360 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2361
2362 /*
2363 * Treat the first page as a special case.
2364 */
2365 if (!cb)
2366 return VINF_SUCCESS;
2367
2368 /* map the 1st page */
2369 void *pvDst;
2370 PGMPAGEMAPLOCK Lock;
2371 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2372 if (RT_FAILURE(rc))
2373 return rc;
2374
2375 /* optimize for the case where access is completely within the first page. */
2376 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2377 if (RT_LIKELY(cb <= cbPage))
2378 {
2379 memcpy(pvDst, pvSrc, cb);
2380 PGMPhysReleasePageMappingLock(pVM, &Lock);
2381 return VINF_SUCCESS;
2382 }
2383
2384 /* copy to the end of the page. */
2385 memcpy(pvDst, pvSrc, cbPage);
2386 PGMPhysReleasePageMappingLock(pVM, &Lock);
2387 GCPhysDst += cbPage;
2388 pvSrc = (const uint8_t *)pvSrc + cbPage;
2389 cb -= cbPage;
2390
2391 /*
2392 * Page by page.
2393 */
2394 for (;;)
2395 {
2396 /* map the page */
2397 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2398 if (RT_FAILURE(rc))
2399 return rc;
2400
2401 /* last page? */
2402 if (cb <= PAGE_SIZE)
2403 {
2404 memcpy(pvDst, pvSrc, cb);
2405 PGMPhysReleasePageMappingLock(pVM, &Lock);
2406 return VINF_SUCCESS;
2407 }
2408
2409 /* copy the entire page and advance */
2410 memcpy(pvDst, pvSrc, PAGE_SIZE);
2411 PGMPhysReleasePageMappingLock(pVM, &Lock);
2412 GCPhysDst += PAGE_SIZE;
2413 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2414 cb -= PAGE_SIZE;
2415 }
2416 /* won't ever get here. */
2417}
2418
2419
2420/**
2421 * Read from guest physical memory referenced by GC pointer.
2422 *
2423 * This function uses the current CR3/CR0/CR4 of the guest and will
2424 * bypass access handlers and not set any accessed bits.
2425 *
2426 * @returns VBox status.
2427 * @param pVCpu The VMCPU handle.
2428 * @param pvDst The destination address.
2429 * @param GCPtrSrc The source address (GC pointer).
2430 * @param cb The number of bytes to read.
2431 */
2432VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2433{
2434 PVM pVM = pVCpu->CTX_SUFF(pVM);
2435
2436 /*
2437 * Treat the first page as a special case.
2438 */
2439 if (!cb)
2440 return VINF_SUCCESS;
2441
2442 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2443 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2444
2445 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2446 * when many VCPUs are fighting for the lock.
2447 */
2448 pgmLock(pVM);
2449
2450 /* map the 1st page */
2451 void const *pvSrc;
2452 PGMPAGEMAPLOCK Lock;
2453 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2454 if (RT_FAILURE(rc))
2455 {
2456 pgmUnlock(pVM);
2457 return rc;
2458 }
2459
2460 /* optimize for the case where access is completely within the first page. */
2461 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2462 if (RT_LIKELY(cb <= cbPage))
2463 {
2464 memcpy(pvDst, pvSrc, cb);
2465 PGMPhysReleasePageMappingLock(pVM, &Lock);
2466 pgmUnlock(pVM);
2467 return VINF_SUCCESS;
2468 }
2469
2470 /* copy to the end of the page. */
2471 memcpy(pvDst, pvSrc, cbPage);
2472 PGMPhysReleasePageMappingLock(pVM, &Lock);
2473 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2474 pvDst = (uint8_t *)pvDst + cbPage;
2475 cb -= cbPage;
2476
2477 /*
2478 * Page by page.
2479 */
2480 for (;;)
2481 {
2482 /* map the page */
2483 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2484 if (RT_FAILURE(rc))
2485 {
2486 pgmUnlock(pVM);
2487 return rc;
2488 }
2489
2490 /* last page? */
2491 if (cb <= PAGE_SIZE)
2492 {
2493 memcpy(pvDst, pvSrc, cb);
2494 PGMPhysReleasePageMappingLock(pVM, &Lock);
2495 pgmUnlock(pVM);
2496 return VINF_SUCCESS;
2497 }
2498
2499 /* copy the entire page and advance */
2500 memcpy(pvDst, pvSrc, PAGE_SIZE);
2501 PGMPhysReleasePageMappingLock(pVM, &Lock);
2502 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2503 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2504 cb -= PAGE_SIZE;
2505 }
2506 /* won't ever get here. */
2507}
2508
2509
2510/**
2511 * Write to guest physical memory referenced by GC pointer.
2512 *
2513 * This function uses the current CR3/CR0/CR4 of the guest and will
2514 * bypass access handlers and not set dirty or accessed bits.
2515 *
2516 * @returns VBox status.
2517 * @param pVCpu The VMCPU handle.
2518 * @param GCPtrDst The destination address (GC pointer).
2519 * @param pvSrc The source address.
2520 * @param cb The number of bytes to write.
2521 */
2522VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2523{
2524 PVM pVM = pVCpu->CTX_SUFF(pVM);
2525
2526 /*
2527 * Treat the first page as a special case.
2528 */
2529 if (!cb)
2530 return VINF_SUCCESS;
2531
2532 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2533 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2534
2535 /* map the 1st page */
2536 void *pvDst;
2537 PGMPAGEMAPLOCK Lock;
2538 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2539 if (RT_FAILURE(rc))
2540 return rc;
2541
2542 /* optimize for the case where access is completely within the first page. */
2543 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2544 if (RT_LIKELY(cb <= cbPage))
2545 {
2546 memcpy(pvDst, pvSrc, cb);
2547 PGMPhysReleasePageMappingLock(pVM, &Lock);
2548 return VINF_SUCCESS;
2549 }
2550
2551 /* copy to the end of the page. */
2552 memcpy(pvDst, pvSrc, cbPage);
2553 PGMPhysReleasePageMappingLock(pVM, &Lock);
2554 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2555 pvSrc = (const uint8_t *)pvSrc + cbPage;
2556 cb -= cbPage;
2557
2558 /*
2559 * Page by page.
2560 */
2561 for (;;)
2562 {
2563 /* map the page */
2564 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2565 if (RT_FAILURE(rc))
2566 return rc;
2567
2568 /* last page? */
2569 if (cb <= PAGE_SIZE)
2570 {
2571 memcpy(pvDst, pvSrc, cb);
2572 PGMPhysReleasePageMappingLock(pVM, &Lock);
2573 return VINF_SUCCESS;
2574 }
2575
2576 /* copy the entire page and advance */
2577 memcpy(pvDst, pvSrc, PAGE_SIZE);
2578 PGMPhysReleasePageMappingLock(pVM, &Lock);
2579 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2580 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2581 cb -= PAGE_SIZE;
2582 }
2583 /* won't ever get here. */
2584}
2585
2586
2587/**
2588 * Write to guest physical memory referenced by GC pointer and update the PTE.
2589 *
2590 * This function uses the current CR3/CR0/CR4 of the guest and will
2591 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2592 *
2593 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2594 *
2595 * @returns VBox status.
2596 * @param pVCpu The VMCPU handle.
2597 * @param GCPtrDst The destination address (GC pointer).
2598 * @param pvSrc The source address.
2599 * @param cb The number of bytes to write.
2600 */
2601VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2602{
2603 PVM pVM = pVCpu->CTX_SUFF(pVM);
2604
2605 /*
2606 * Treat the first page as a special case.
2607 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2608 */
2609 if (!cb)
2610 return VINF_SUCCESS;
2611
2612 /* map the 1st page */
2613 void *pvDst;
2614 PGMPAGEMAPLOCK Lock;
2615 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2616 if (RT_FAILURE(rc))
2617 return rc;
2618
2619 /* optimize for the case where access is completely within the first page. */
2620 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2621 if (RT_LIKELY(cb <= cbPage))
2622 {
2623 memcpy(pvDst, pvSrc, cb);
2624 PGMPhysReleasePageMappingLock(pVM, &Lock);
2625 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2626 return VINF_SUCCESS;
2627 }
2628
2629 /* copy to the end of the page. */
2630 memcpy(pvDst, pvSrc, cbPage);
2631 PGMPhysReleasePageMappingLock(pVM, &Lock);
2632 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2633 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2634 pvSrc = (const uint8_t *)pvSrc + cbPage;
2635 cb -= cbPage;
2636
2637 /*
2638 * Page by page.
2639 */
2640 for (;;)
2641 {
2642 /* map the page */
2643 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2644 if (RT_FAILURE(rc))
2645 return rc;
2646
2647 /* last page? */
2648 if (cb <= PAGE_SIZE)
2649 {
2650 memcpy(pvDst, pvSrc, cb);
2651 PGMPhysReleasePageMappingLock(pVM, &Lock);
2652 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2653 return VINF_SUCCESS;
2654 }
2655
2656 /* copy the entire page and advance */
2657 memcpy(pvDst, pvSrc, PAGE_SIZE);
2658 PGMPhysReleasePageMappingLock(pVM, &Lock);
2659 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2660 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2661 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2662 cb -= PAGE_SIZE;
2663 }
2664 /* won't ever get here. */
2665}
2666
2667
2668/**
2669 * Read from guest physical memory referenced by GC pointer.
2670 *
2671 * This function uses the current CR3/CR0/CR4 of the guest and will
2672 * respect access handlers and set accessed bits.
2673 *
2674 * @returns VBox status.
2675 * @param pVCpu The VMCPU handle.
2676 * @param pvDst The destination address.
2677 * @param GCPtrSrc The source address (GC pointer).
2678 * @param cb The number of bytes to read.
2679 * @thread The vCPU EMT.
2680 */
2681VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2682{
2683 RTGCPHYS GCPhys;
2684 uint64_t fFlags;
2685 int rc;
2686 PVM pVM = pVCpu->CTX_SUFF(pVM);
2687
2688 /*
2689 * Anything to do?
2690 */
2691 if (!cb)
2692 return VINF_SUCCESS;
2693
2694 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2695
2696 /*
2697 * Optimize reads within a single page.
2698 */
2699 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2700 {
2701 /* Convert virtual to physical address + flags */
2702 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2703 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2704 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2705
2706 /* mark the guest page as accessed. */
2707 if (!(fFlags & X86_PTE_A))
2708 {
2709 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2710 AssertRC(rc);
2711 }
2712
2713 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2714 }
2715
2716 /*
2717 * Page by page.
2718 */
2719 for (;;)
2720 {
2721 /* Convert virtual to physical address + flags */
2722 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2723 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2724 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2725
2726 /* mark the guest page as accessed. */
2727 if (!(fFlags & X86_PTE_A))
2728 {
2729 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2730 AssertRC(rc);
2731 }
2732
2733 /* copy */
2734 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2735 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2736 if (cbRead >= cb || RT_FAILURE(rc))
2737 return rc;
2738
2739 /* next */
2740 cb -= cbRead;
2741 pvDst = (uint8_t *)pvDst + cbRead;
2742 GCPtrSrc += cbRead;
2743 }
2744}
2745
2746
2747/**
2748 * Write to guest physical memory referenced by GC pointer.
2749 *
2750 * This function uses the current CR3/CR0/CR4 of the guest and will
2751 * respect access handlers and set dirty and accessed bits.
2752 *
2753 * @returns VBox status.
2754 * @retval VINF_SUCCESS.
2755 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2756 *
2757 * @param pVCpu The VMCPU handle.
2758 * @param GCPtrDst The destination address (GC pointer).
2759 * @param pvSrc The source address.
2760 * @param cb The number of bytes to write.
2761 */
2762VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2763{
2764 RTGCPHYS GCPhys;
2765 uint64_t fFlags;
2766 int rc;
2767 PVM pVM = pVCpu->CTX_SUFF(pVM);
2768
2769 /*
2770 * Anything to do?
2771 */
2772 if (!cb)
2773 return VINF_SUCCESS;
2774
2775 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2776
2777 /*
2778 * Optimize writes within a single page.
2779 */
2780 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2781 {
2782 /* Convert virtual to physical address + flags */
2783 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2784 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2785 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2786
2787 /* Mention when we ignore X86_PTE_RW... */
2788 if (!(fFlags & X86_PTE_RW))
2789 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2790
2791 /* Mark the guest page as accessed and dirty if necessary. */
2792 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2793 {
2794 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2795 AssertRC(rc);
2796 }
2797
2798 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2799 }
2800
2801 /*
2802 * Page by page.
2803 */
2804 for (;;)
2805 {
2806 /* Convert virtual to physical address + flags */
2807 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2808 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2809 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2810
2811 /* Mention when we ignore X86_PTE_RW... */
2812 if (!(fFlags & X86_PTE_RW))
2813 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2814
2815 /* Mark the guest page as accessed and dirty if necessary. */
2816 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2817 {
2818 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2819 AssertRC(rc);
2820 }
2821
2822 /* copy */
2823 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2824 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2825 if (cbWrite >= cb || RT_FAILURE(rc))
2826 return rc;
2827
2828 /* next */
2829 cb -= cbWrite;
2830 pvSrc = (uint8_t *)pvSrc + cbWrite;
2831 GCPtrDst += cbWrite;
2832 }
2833}
2834
2835
2836/**
2837 * Performs a read of guest virtual memory for instruction emulation.
2838 *
2839 * This will check permissions, raise exceptions and update the access bits.
2840 *
2841 * The current implementation will bypass all access handlers. It may later be
2842 * changed to at least respect MMIO.
2843 *
2844 *
2845 * @returns VBox status code suitable to scheduling.
2846 * @retval VINF_SUCCESS if the read was performed successfully.
2847 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2848 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2849 *
2850 * @param pVCpu The VMCPU handle.
2851 * @param pCtxCore The context core.
2852 * @param pvDst Where to put the bytes we've read.
2853 * @param GCPtrSrc The source address.
2854 * @param cb The number of bytes to read. Not more than a page.
2855 *
2856 * @remark This function will dynamically map physical pages in GC. This may unmap
2857 * mappings done by the caller. Be careful!
2858 */
2859VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2860{
2861 PVM pVM = pVCpu->CTX_SUFF(pVM);
2862 Assert(cb <= PAGE_SIZE);
2863
2864/** @todo r=bird: This isn't perfect!
2865 * -# It's not checking for reserved bits being 1.
2866 * -# It's not correctly dealing with the access bit.
2867 * -# It's not respecting MMIO memory or any other access handlers.
2868 */
2869 /*
2870 * 1. Translate virtual to physical. This may fault.
2871 * 2. Map the physical address.
2872 * 3. Do the read operation.
2873 * 4. Set access bits if required.
2874 */
2875 int rc;
2876 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2877 if (cb <= cb1)
2878 {
2879 /*
2880 * Not crossing pages.
2881 */
2882 RTGCPHYS GCPhys;
2883 uint64_t fFlags;
2884 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
2885 if (RT_SUCCESS(rc))
2886 {
2887 /** @todo we should check reserved bits ... */
2888 void *pvSrc;
2889 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2890 switch (rc)
2891 {
2892 case VINF_SUCCESS:
2893 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2894 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2895 break;
2896 case VERR_PGM_PHYS_PAGE_RESERVED:
2897 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2898 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2899 break;
2900 default:
2901 return rc;
2902 }
2903
2904 /** @todo access bit emulation isn't 100% correct. */
2905 if (!(fFlags & X86_PTE_A))
2906 {
2907 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2908 AssertRC(rc);
2909 }
2910 return VINF_SUCCESS;
2911 }
2912 }
2913 else
2914 {
2915 /*
2916 * Crosses pages.
2917 */
2918 size_t cb2 = cb - cb1;
2919 uint64_t fFlags1;
2920 RTGCPHYS GCPhys1;
2921 uint64_t fFlags2;
2922 RTGCPHYS GCPhys2;
2923 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
2924 if (RT_SUCCESS(rc))
2925 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2926 if (RT_SUCCESS(rc))
2927 {
2928 /** @todo we should check reserved bits ... */
2929 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2930 void *pvSrc1;
2931 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2932 switch (rc)
2933 {
2934 case VINF_SUCCESS:
2935 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2936 break;
2937 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2938 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2939 break;
2940 default:
2941 return rc;
2942 }
2943
2944 void *pvSrc2;
2945 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2946 switch (rc)
2947 {
2948 case VINF_SUCCESS:
2949 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2950 break;
2951 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2952 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2953 break;
2954 default:
2955 return rc;
2956 }
2957
2958 if (!(fFlags1 & X86_PTE_A))
2959 {
2960 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2961 AssertRC(rc);
2962 }
2963 if (!(fFlags2 & X86_PTE_A))
2964 {
2965 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2966 AssertRC(rc);
2967 }
2968 return VINF_SUCCESS;
2969 }
2970 }
2971
2972 /*
2973 * Raise a #PF.
2974 */
2975 uint32_t uErr;
2976
2977 /* Get the current privilege level. */
2978 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
2979 switch (rc)
2980 {
2981 case VINF_SUCCESS:
2982 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2983 break;
2984
2985 case VERR_PAGE_NOT_PRESENT:
2986 case VERR_PAGE_TABLE_NOT_PRESENT:
2987 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2988 break;
2989
2990 default:
2991 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2992 return rc;
2993 }
2994 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2995 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2996}
2997
2998
2999/**
3000 * Performs a read of guest virtual memory for instruction emulation.
3001 *
3002 * This will check permissions, raise exceptions and update the access bits.
3003 *
3004 * The current implementation will bypass all access handlers. It may later be
3005 * changed to at least respect MMIO.
3006 *
3007 *
3008 * @returns VBox status code suitable to scheduling.
3009 * @retval VINF_SUCCESS if the read was performed successfully.
3010 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3011 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3012 *
3013 * @param pVCpu The VMCPU handle.
3014 * @param pCtxCore The context core.
3015 * @param pvDst Where to put the bytes we've read.
3016 * @param GCPtrSrc The source address.
3017 * @param cb The number of bytes to read. Not more than a page.
3018 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3019 * an appropriate error status will be returned (no
3020 * informational at all).
3021 *
3022 *
3023 * @remarks Takes the PGM lock.
3024 * @remarks A page fault on the 2nd page of the access will be raised without
3025 * writing the bits on the first page since we're ASSUMING that the
3026 * caller is emulating an instruction access.
3027 * @remarks This function will dynamically map physical pages in GC. This may
3028 * unmap mappings done by the caller. Be careful!
3029 */
3030VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3031{
3032 PVM pVM = pVCpu->CTX_SUFF(pVM);
3033 Assert(cb <= PAGE_SIZE);
3034
3035 /*
3036 * 1. Translate virtual to physical. This may fault.
3037 * 2. Map the physical address.
3038 * 3. Do the read operation.
3039 * 4. Set access bits if required.
3040 */
3041 int rc;
3042 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3043 if (cb <= cb1)
3044 {
3045 /*
3046 * Not crossing pages.
3047 */
3048 RTGCPHYS GCPhys;
3049 uint64_t fFlags;
3050 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3051 if (RT_SUCCESS(rc))
3052 {
3053 if (1) /** @todo we should check reserved bits ... */
3054 {
3055 const void *pvSrc;
3056 PGMPAGEMAPLOCK Lock;
3057 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3058 switch (rc)
3059 {
3060 case VINF_SUCCESS:
3061 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3062 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3063 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3064 break;
3065 case VERR_PGM_PHYS_PAGE_RESERVED:
3066 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3067 memset(pvDst, 0xff, cb);
3068 break;
3069 default:
3070 AssertMsgFailed(("%Rrc\n", rc));
3071 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3072 return rc;
3073 }
3074 PGMPhysReleasePageMappingLock(pVM, &Lock);
3075
3076 if (!(fFlags & X86_PTE_A))
3077 {
3078 /** @todo access bit emulation isn't 100% correct. */
3079 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3080 AssertRC(rc);
3081 }
3082 return VINF_SUCCESS;
3083 }
3084 }
3085 }
3086 else
3087 {
3088 /*
3089 * Crosses pages.
3090 */
3091 size_t cb2 = cb - cb1;
3092 uint64_t fFlags1;
3093 RTGCPHYS GCPhys1;
3094 uint64_t fFlags2;
3095 RTGCPHYS GCPhys2;
3096 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3097 if (RT_SUCCESS(rc))
3098 {
3099 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3100 if (RT_SUCCESS(rc))
3101 {
3102 if (1) /** @todo we should check reserved bits ... */
3103 {
3104 const void *pvSrc;
3105 PGMPAGEMAPLOCK Lock;
3106 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3107 switch (rc)
3108 {
3109 case VINF_SUCCESS:
3110 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3111 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3112 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3113 PGMPhysReleasePageMappingLock(pVM, &Lock);
3114 break;
3115 case VERR_PGM_PHYS_PAGE_RESERVED:
3116 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3117 memset(pvDst, 0xff, cb1);
3118 break;
3119 default:
3120 AssertMsgFailed(("%Rrc\n", rc));
3121 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3122 return rc;
3123 }
3124
3125 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3126 switch (rc)
3127 {
3128 case VINF_SUCCESS:
3129 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3130 PGMPhysReleasePageMappingLock(pVM, &Lock);
3131 break;
3132 case VERR_PGM_PHYS_PAGE_RESERVED:
3133 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3134 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3135 break;
3136 default:
3137 AssertMsgFailed(("%Rrc\n", rc));
3138 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3139 return rc;
3140 }
3141
3142 if (!(fFlags1 & X86_PTE_A))
3143 {
3144 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3145 AssertRC(rc);
3146 }
3147 if (!(fFlags2 & X86_PTE_A))
3148 {
3149 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3150 AssertRC(rc);
3151 }
3152 return VINF_SUCCESS;
3153 }
3154 /* sort out which page */
3155 }
3156 else
3157 GCPtrSrc += cb1; /* fault on 2nd page */
3158 }
3159 }
3160
3161 /*
3162 * Raise a #PF if we're allowed to do that.
3163 */
3164 /* Calc the error bits. */
3165 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3166 uint32_t uErr;
3167 switch (rc)
3168 {
3169 case VINF_SUCCESS:
3170 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3171 rc = VERR_ACCESS_DENIED;
3172 break;
3173
3174 case VERR_PAGE_NOT_PRESENT:
3175 case VERR_PAGE_TABLE_NOT_PRESENT:
3176 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3177 break;
3178
3179 default:
3180 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3181 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3182 return rc;
3183 }
3184 if (fRaiseTrap)
3185 {
3186 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3187 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3188 }
3189 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3190 return rc;
3191}
3192
3193
3194/**
3195 * Performs a write to guest virtual memory for instruction emulation.
3196 *
3197 * This will check permissions, raise exceptions and update the dirty and access
3198 * bits.
3199 *
3200 * @returns VBox status code suitable to scheduling.
3201 * @retval VINF_SUCCESS if the read was performed successfully.
3202 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3203 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3204 *
3205 * @param pVCpu The VMCPU handle.
3206 * @param pCtxCore The context core.
3207 * @param GCPtrDst The destination address.
3208 * @param pvSrc What to write.
3209 * @param cb The number of bytes to write. Not more than a page.
3210 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3211 * an appropriate error status will be returned (no
3212 * informational at all).
3213 *
3214 * @remarks Takes the PGM lock.
3215 * @remarks A page fault on the 2nd page of the access will be raised without
3216 * writing the bits on the first page since we're ASSUMING that the
3217 * caller is emulating an instruction access.
3218 * @remarks This function will dynamically map physical pages in GC. This may
3219 * unmap mappings done by the caller. Be careful!
3220 */
3221VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3222{
3223 Assert(cb <= PAGE_SIZE);
3224 PVM pVM = pVCpu->CTX_SUFF(pVM);
3225
3226 /*
3227 * 1. Translate virtual to physical. This may fault.
3228 * 2. Map the physical address.
3229 * 3. Do the write operation.
3230 * 4. Set access bits if required.
3231 */
3232 int rc;
3233 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3234 if (cb <= cb1)
3235 {
3236 /*
3237 * Not crossing pages.
3238 */
3239 RTGCPHYS GCPhys;
3240 uint64_t fFlags;
3241 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3242 if (RT_SUCCESS(rc))
3243 {
3244 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3245 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3246 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3247 {
3248 void *pvDst;
3249 PGMPAGEMAPLOCK Lock;
3250 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3251 switch (rc)
3252 {
3253 case VINF_SUCCESS:
3254 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3255 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3256 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3257 PGMPhysReleasePageMappingLock(pVM, &Lock);
3258 break;
3259 case VERR_PGM_PHYS_PAGE_RESERVED:
3260 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3261 /* bit bucket */
3262 break;
3263 default:
3264 AssertMsgFailed(("%Rrc\n", rc));
3265 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3266 return rc;
3267 }
3268
3269 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3270 {
3271 /** @todo dirty & access bit emulation isn't 100% correct. */
3272 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3273 AssertRC(rc);
3274 }
3275 return VINF_SUCCESS;
3276 }
3277 rc = VERR_ACCESS_DENIED;
3278 }
3279 }
3280 else
3281 {
3282 /*
3283 * Crosses pages.
3284 */
3285 size_t cb2 = cb - cb1;
3286 uint64_t fFlags1;
3287 RTGCPHYS GCPhys1;
3288 uint64_t fFlags2;
3289 RTGCPHYS GCPhys2;
3290 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3291 if (RT_SUCCESS(rc))
3292 {
3293 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3294 if (RT_SUCCESS(rc))
3295 {
3296 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3297 && (fFlags2 & X86_PTE_RW))
3298 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3299 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3300 {
3301 void *pvDst;
3302 PGMPAGEMAPLOCK Lock;
3303 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3304 switch (rc)
3305 {
3306 case VINF_SUCCESS:
3307 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3308 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3309 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3310 PGMPhysReleasePageMappingLock(pVM, &Lock);
3311 break;
3312 case VERR_PGM_PHYS_PAGE_RESERVED:
3313 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3314 /* bit bucket */
3315 break;
3316 default:
3317 AssertMsgFailed(("%Rrc\n", rc));
3318 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3319 return rc;
3320 }
3321
3322 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3323 switch (rc)
3324 {
3325 case VINF_SUCCESS:
3326 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3327 PGMPhysReleasePageMappingLock(pVM, &Lock);
3328 break;
3329 case VERR_PGM_PHYS_PAGE_RESERVED:
3330 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3331 /* bit bucket */
3332 break;
3333 default:
3334 AssertMsgFailed(("%Rrc\n", rc));
3335 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3336 return rc;
3337 }
3338
3339 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3340 {
3341 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3342 AssertRC(rc);
3343 }
3344 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3345 {
3346 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3347 AssertRC(rc);
3348 }
3349 return VINF_SUCCESS;
3350 }
3351 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3352 GCPtrDst += cb1; /* fault on the 2nd page. */
3353 rc = VERR_ACCESS_DENIED;
3354 }
3355 else
3356 GCPtrDst += cb1; /* fault on the 2nd page. */
3357 }
3358 }
3359
3360 /*
3361 * Raise a #PF if we're allowed to do that.
3362 */
3363 /* Calc the error bits. */
3364 uint32_t uErr;
3365 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3366 switch (rc)
3367 {
3368 case VINF_SUCCESS:
3369 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3370 rc = VERR_ACCESS_DENIED;
3371 break;
3372
3373 case VERR_ACCESS_DENIED:
3374 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3375 break;
3376
3377 case VERR_PAGE_NOT_PRESENT:
3378 case VERR_PAGE_TABLE_NOT_PRESENT:
3379 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3380 break;
3381
3382 default:
3383 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3384 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3385 return rc;
3386 }
3387 if (fRaiseTrap)
3388 {
3389 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3390 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3391 }
3392 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3393 return rc;
3394}
3395
3396
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette