VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 25585

Last change on this file since 25585 was 25585, checked in by vboxsync, 15 years ago

Better make absolutely sure the TLB entry is invalid

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 120.9 KB
Line 
1/* $Id: PGMAllPhys.cpp 25585 2009-12-26 11:22:13Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 PVMCPU pVCpu = VMMGetCpu(pVM);
66
67 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
68 switch (pRom->aPages[iPage].enmProt)
69 {
70 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
71 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
72 {
73 /*
74 * If it's a simple instruction which doesn't change the cpu state
75 * we will simply skip it. Otherwise we'll have to defer it to REM.
76 */
77 uint32_t cbOp;
78 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
79 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
80 if ( RT_SUCCESS(rc)
81 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
82 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
83 {
84 switch (pDis->opcode)
85 {
86 /** @todo Find other instructions we can safely skip, possibly
87 * adding this kind of detection to DIS or EM. */
88 case OP_MOV:
89 pRegFrame->rip += cbOp;
90 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
91 return VINF_SUCCESS;
92 }
93 }
94 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
95 return rc;
96 break;
97 }
98
99 case PGMROMPROT_READ_RAM_WRITE_RAM:
100 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
101 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
102 AssertRC(rc);
103 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
104
105 case PGMROMPROT_READ_ROM_WRITE_RAM:
106 /* Handle it in ring-3 because it's *way* easier there. */
107 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
108 break;
109
110 default:
111 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
112 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
113 VERR_INTERNAL_ERROR);
114 }
115
116 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
117 return VINF_EM_RAW_EMULATE_INSTR;
118}
119
120#endif /* IN_RING3 */
121
122/**
123 * Checks if Address Gate 20 is enabled or not.
124 *
125 * @returns true if enabled.
126 * @returns false if disabled.
127 * @param pVCpu VMCPU handle.
128 */
129VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
130{
131 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
132 return pVCpu->pgm.s.fA20Enabled;
133}
134
135
136/**
137 * Validates a GC physical address.
138 *
139 * @returns true if valid.
140 * @returns false if invalid.
141 * @param pVM The VM handle.
142 * @param GCPhys The physical address to validate.
143 */
144VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
145{
146 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
147 return pPage != NULL;
148}
149
150
151/**
152 * Checks if a GC physical address is a normal page,
153 * i.e. not ROM, MMIO or reserved.
154 *
155 * @returns true if normal.
156 * @returns false if invalid, ROM, MMIO or reserved page.
157 * @param pVM The VM handle.
158 * @param GCPhys The physical address to check.
159 */
160VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
161{
162 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
163 return pPage
164 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
165}
166
167
168/**
169 * Converts a GC physical address to a HC physical address.
170 *
171 * @returns VINF_SUCCESS on success.
172 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
173 * page but has no physical backing.
174 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
175 * GC physical address.
176 *
177 * @param pVM The VM handle.
178 * @param GCPhys The GC physical address to convert.
179 * @param pHCPhys Where to store the HC physical address on success.
180 */
181VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
182{
183 pgmLock(pVM);
184 PPGMPAGE pPage;
185 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
186 if (RT_SUCCESS(rc))
187 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
188 pgmUnlock(pVM);
189 return rc;
190}
191
192
193/**
194 * Invalidates all page mapping TLBs.
195 *
196 * @param pVM The VM handle.
197 */
198VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
199{
200 pgmLock(pVM);
201 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushes);
202 /* Clear the shared R0/R3 TLB completely. */
203 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
204 {
205 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
206 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
207 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
208 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
209 }
210 /* @todo clear the RC TLB whenever we add it. */
211 pgmUnlock(pVM);
212}
213
214/**
215 * Invalidates a page mapping TLB entry
216 *
217 * @param pVM The VM handle.
218 * @param GCPhys GCPhys entry to flush
219 */
220VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
221{
222 Assert(PGMIsLocked(pVM));
223
224 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushEntry);
225 /* Clear the shared R0/R3 TLB entry. */
226#ifdef IN_RC
227 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
228 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
229 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
230 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
231 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
232#else
233 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
234 pTlbe->GCPhys = NIL_RTGCPHYS;
235 pTlbe->pPage = 0;
236 pTlbe->pMap = 0;
237 pTlbe->pv = 0;
238#endif
239 /* @todo clear the RC TLB whenever we add it. */
240}
241
242/**
243 * Makes sure that there is at least one handy page ready for use.
244 *
245 * This will also take the appropriate actions when reaching water-marks.
246 *
247 * @returns VBox status code.
248 * @retval VINF_SUCCESS on success.
249 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
250 *
251 * @param pVM The VM handle.
252 *
253 * @remarks Must be called from within the PGM critical section. It may
254 * nip back to ring-3/0 in some cases.
255 */
256static int pgmPhysEnsureHandyPage(PVM pVM)
257{
258 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
259
260 /*
261 * Do we need to do anything special?
262 */
263#ifdef IN_RING3
264 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
265#else
266 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
267#endif
268 {
269 /*
270 * Allocate pages only if we're out of them, or in ring-3, almost out.
271 */
272#ifdef IN_RING3
273 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
274#else
275 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
276#endif
277 {
278 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
279 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
280#ifdef IN_RING3
281 int rc = PGMR3PhysAllocateHandyPages(pVM);
282#else
283 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
284#endif
285 if (RT_UNLIKELY(rc != VINF_SUCCESS))
286 {
287 if (RT_FAILURE(rc))
288 return rc;
289 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
290 if (!pVM->pgm.s.cHandyPages)
291 {
292 LogRel(("PGM: no more handy pages!\n"));
293 return VERR_EM_NO_MEMORY;
294 }
295 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
296 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
297#ifdef IN_RING3
298 REMR3NotifyFF(pVM);
299#else
300 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
301#endif
302 }
303 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
304 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
305 ("%u\n", pVM->pgm.s.cHandyPages),
306 VERR_INTERNAL_ERROR);
307 }
308 else
309 {
310 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
311 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
312#ifndef IN_RING3
313 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
314 {
315 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
316 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
317 }
318#endif
319 }
320 }
321
322 return VINF_SUCCESS;
323}
324
325
326/**
327 * Replace a zero or shared page with new page that we can write to.
328 *
329 * @returns The following VBox status codes.
330 * @retval VINF_SUCCESS on success, pPage is modified.
331 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
332 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
333 *
334 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
335 *
336 * @param pVM The VM address.
337 * @param pPage The physical page tracking structure. This will
338 * be modified on success.
339 * @param GCPhys The address of the page.
340 *
341 * @remarks Must be called from within the PGM critical section. It may
342 * nip back to ring-3/0 in some cases.
343 *
344 * @remarks This function shouldn't really fail, however if it does
345 * it probably means we've screwed up the size of handy pages and/or
346 * the low-water mark. Or, that some device I/O is causing a lot of
347 * pages to be allocated while while the host is in a low-memory
348 * condition. This latter should be handled elsewhere and in a more
349 * controlled manner, it's on the @bugref{3170} todo list...
350 */
351int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
352{
353 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
354
355 /*
356 * Prereqs.
357 */
358 Assert(PGMIsLocked(pVM));
359 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
360 Assert(!PGM_PAGE_IS_MMIO(pPage));
361
362
363 /*
364 * Flush any shadow page table mappings of the page.
365 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
366 */
367 bool fFlushTLBs = false;
368 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
369 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
370
371 /*
372 * Ensure that we've got a page handy, take it and use it.
373 */
374 int rc2 = pgmPhysEnsureHandyPage(pVM);
375 if (RT_FAILURE(rc2))
376 {
377 if (fFlushTLBs)
378 PGM_INVL_ALL_VCPU_TLBS(pVM);
379 Assert(rc2 == VERR_EM_NO_MEMORY);
380 return rc2;
381 }
382 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
383 Assert(PGMIsLocked(pVM));
384 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
385 Assert(!PGM_PAGE_IS_MMIO(pPage));
386
387 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
388 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
389 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
390 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
391 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
392 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
393
394 /*
395 * There are one or two action to be taken the next time we allocate handy pages:
396 * - Tell the GMM (global memory manager) what the page is being used for.
397 * (Speeds up replacement operations - sharing and defragmenting.)
398 * - If the current backing is shared, it must be freed.
399 */
400 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
401 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
402
403 if (PGM_PAGE_IS_SHARED(pPage))
404 {
405 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
406 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
407 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
408
409 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
410 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
411 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
412 pVM->pgm.s.cSharedPages--;
413 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
414 }
415 else
416 {
417 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
418 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
419 pVM->pgm.s.cZeroPages--;
420 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
421 }
422
423 /*
424 * Do the PGMPAGE modifications.
425 */
426 pVM->pgm.s.cPrivatePages++;
427 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
428 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
429 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
430 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
431
432 if ( fFlushTLBs
433 && rc != VINF_PGM_GCPHYS_ALIASED)
434 PGM_INVL_ALL_VCPU_TLBS(pVM);
435 return rc;
436}
437
438
439/**
440 * Deal with a write monitored page.
441 *
442 * @returns VBox strict status code.
443 *
444 * @param pVM The VM address.
445 * @param pPage The physical page tracking structure.
446 *
447 * @remarks Called from within the PGM critical section.
448 */
449void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
450{
451 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
452 PGM_PAGE_SET_WRITTEN_TO(pPage);
453 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
454 Assert(pVM->pgm.s.cMonitoredPages > 0);
455 pVM->pgm.s.cMonitoredPages--;
456 pVM->pgm.s.cWrittenToPages++;
457}
458
459
460/**
461 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
462 *
463 * @returns VBox strict status code.
464 * @retval VINF_SUCCESS on success.
465 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
466 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
467 *
468 * @param pVM The VM address.
469 * @param pPage The physical page tracking structure.
470 * @param GCPhys The address of the page.
471 *
472 * @remarks Called from within the PGM critical section.
473 */
474int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
475{
476 switch (PGM_PAGE_GET_STATE(pPage))
477 {
478 case PGM_PAGE_STATE_WRITE_MONITORED:
479 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
480 /* fall thru */
481 default: /* to shut up GCC */
482 case PGM_PAGE_STATE_ALLOCATED:
483 return VINF_SUCCESS;
484
485 /*
486 * Zero pages can be dummy pages for MMIO or reserved memory,
487 * so we need to check the flags before joining cause with
488 * shared page replacement.
489 */
490 case PGM_PAGE_STATE_ZERO:
491 if (PGM_PAGE_IS_MMIO(pPage))
492 return VERR_PGM_PHYS_PAGE_RESERVED;
493 /* fall thru */
494 case PGM_PAGE_STATE_SHARED:
495 return pgmPhysAllocPage(pVM, pPage, GCPhys);
496 }
497}
498
499
500/**
501 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
502 *
503 * @returns VBox strict status code.
504 * @retval VINF_SUCCESS on success.
505 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
506 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
507 *
508 * @param pVM The VM address.
509 * @param pPage The physical page tracking structure.
510 * @param GCPhys The address of the page.
511 */
512int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
513{
514 int rc = pgmLock(pVM);
515 if (RT_SUCCESS(rc))
516 {
517 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
518 pgmUnlock(pVM);
519 }
520 return rc;
521}
522
523
524/**
525 * Internal usage: Map the page specified by its GMM ID.
526 *
527 * This is similar to pgmPhysPageMap
528 *
529 * @returns VBox status code.
530 *
531 * @param pVM The VM handle.
532 * @param idPage The Page ID.
533 * @param HCPhys The physical address (for RC).
534 * @param ppv Where to store the mapping address.
535 *
536 * @remarks Called from within the PGM critical section. The mapping is only
537 * valid while your inside this section.
538 */
539int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
540{
541 /*
542 * Validation.
543 */
544 Assert(PGMIsLocked(pVM));
545 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
546 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
547 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
548
549#ifdef IN_RC
550 /*
551 * Map it by HCPhys.
552 */
553 return PGMDynMapHCPage(pVM, HCPhys, ppv);
554
555#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
556 /*
557 * Map it by HCPhys.
558 */
559 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
560
561#else
562 /*
563 * Find/make Chunk TLB entry for the mapping chunk.
564 */
565 PPGMCHUNKR3MAP pMap;
566 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
567 if (pTlbe->idChunk == idChunk)
568 {
569 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
570 pMap = pTlbe->pChunk;
571 }
572 else
573 {
574 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
575
576 /*
577 * Find the chunk, map it if necessary.
578 */
579 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
580 if (!pMap)
581 {
582# ifdef IN_RING0
583 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
584 AssertRCReturn(rc, rc);
585 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
586 Assert(pMap);
587# else
588 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
589 if (RT_FAILURE(rc))
590 return rc;
591# endif
592 }
593
594 /*
595 * Enter it into the Chunk TLB.
596 */
597 pTlbe->idChunk = idChunk;
598 pTlbe->pChunk = pMap;
599 pMap->iAge = 0;
600 }
601
602 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
603 return VINF_SUCCESS;
604#endif
605}
606
607
608/**
609 * Maps a page into the current virtual address space so it can be accessed.
610 *
611 * @returns VBox status code.
612 * @retval VINF_SUCCESS on success.
613 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
614 *
615 * @param pVM The VM address.
616 * @param pPage The physical page tracking structure.
617 * @param GCPhys The address of the page.
618 * @param ppMap Where to store the address of the mapping tracking structure.
619 * @param ppv Where to store the mapping address of the page. The page
620 * offset is masked off!
621 *
622 * @remarks Called from within the PGM critical section.
623 */
624static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
625{
626 Assert(PGMIsLocked(pVM));
627
628#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
629 /*
630 * Just some sketchy GC/R0-darwin code.
631 */
632 *ppMap = NULL;
633 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
634 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
635# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
636 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
637# else
638 PGMDynMapHCPage(pVM, HCPhys, ppv);
639# endif
640 return VINF_SUCCESS;
641
642#else /* IN_RING3 || IN_RING0 */
643
644
645 /*
646 * Special case: ZERO and MMIO2 pages.
647 */
648 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
649 if (idChunk == NIL_GMM_CHUNKID)
650 {
651 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
652 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
653 {
654 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
655 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
656 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
657 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
658 }
659 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
660 {
661 /** @todo deal with aliased MMIO2 pages somehow...
662 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
663 * them, that would also avoid this mess. It would actually be kind of
664 * elegant... */
665 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
666 }
667 else
668 {
669 /** @todo handle MMIO2 */
670 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
671 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
672 ("pPage=%R[pgmpage]\n", pPage),
673 VERR_INTERNAL_ERROR_2);
674 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
675 }
676 *ppMap = NULL;
677 return VINF_SUCCESS;
678 }
679
680 /*
681 * Find/make Chunk TLB entry for the mapping chunk.
682 */
683 PPGMCHUNKR3MAP pMap;
684 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
685 if (pTlbe->idChunk == idChunk)
686 {
687 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
688 pMap = pTlbe->pChunk;
689 }
690 else
691 {
692 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
693
694 /*
695 * Find the chunk, map it if necessary.
696 */
697 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
698 if (!pMap)
699 {
700#ifdef IN_RING0
701 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
702 AssertRCReturn(rc, rc);
703 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
704 Assert(pMap);
705#else
706 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
707 if (RT_FAILURE(rc))
708 return rc;
709#endif
710 }
711
712 /*
713 * Enter it into the Chunk TLB.
714 */
715 pTlbe->idChunk = idChunk;
716 pTlbe->pChunk = pMap;
717 pMap->iAge = 0;
718 }
719
720 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
721 *ppMap = pMap;
722 return VINF_SUCCESS;
723#endif /* IN_RING3 */
724}
725
726
727/**
728 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
729 *
730 * This is typically used is paths where we cannot use the TLB methods (like ROM
731 * pages) or where there is no point in using them since we won't get many hits.
732 *
733 * @returns VBox strict status code.
734 * @retval VINF_SUCCESS on success.
735 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
736 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
737 *
738 * @param pVM The VM address.
739 * @param pPage The physical page tracking structure.
740 * @param GCPhys The address of the page.
741 * @param ppv Where to store the mapping address of the page. The page
742 * offset is masked off!
743 *
744 * @remarks Called from within the PGM critical section. The mapping is only
745 * valid while your inside this section.
746 */
747int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
748{
749 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
750 if (RT_SUCCESS(rc))
751 {
752 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
753 PPGMPAGEMAP pMapIgnore;
754 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
755 if (RT_FAILURE(rc2)) /* preserve rc */
756 rc = rc2;
757 }
758 return rc;
759}
760
761
762/**
763 * Maps a page into the current virtual address space so it can be accessed for
764 * both writing and reading.
765 *
766 * This is typically used is paths where we cannot use the TLB methods (like ROM
767 * pages) or where there is no point in using them since we won't get many hits.
768 *
769 * @returns VBox status code.
770 * @retval VINF_SUCCESS on success.
771 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
772 *
773 * @param pVM The VM address.
774 * @param pPage The physical page tracking structure. Must be in the
775 * allocated state.
776 * @param GCPhys The address of the page.
777 * @param ppv Where to store the mapping address of the page. The page
778 * offset is masked off!
779 *
780 * @remarks Called from within the PGM critical section. The mapping is only
781 * valid while your inside this section.
782 */
783int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
784{
785 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
786 PPGMPAGEMAP pMapIgnore;
787 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
788}
789
790
791/**
792 * Maps a page into the current virtual address space so it can be accessed for
793 * reading.
794 *
795 * This is typically used is paths where we cannot use the TLB methods (like ROM
796 * pages) or where there is no point in using them since we won't get many hits.
797 *
798 * @returns VBox status code.
799 * @retval VINF_SUCCESS on success.
800 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
801 *
802 * @param pVM The VM address.
803 * @param pPage The physical page tracking structure.
804 * @param GCPhys The address of the page.
805 * @param ppv Where to store the mapping address of the page. The page
806 * offset is masked off!
807 *
808 * @remarks Called from within the PGM critical section. The mapping is only
809 * valid while your inside this section.
810 */
811int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
812{
813 PPGMPAGEMAP pMapIgnore;
814 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
815}
816
817
818#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
819/**
820 * Load a guest page into the ring-3 physical TLB.
821 *
822 * @returns VBox status code.
823 * @retval VINF_SUCCESS on success
824 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
825 * @param pPGM The PGM instance pointer.
826 * @param GCPhys The guest physical address in question.
827 */
828int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
829{
830 Assert(PGMIsLocked(PGM2VM(pPGM)));
831 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
832
833 /*
834 * Find the ram range.
835 * 99.8% of requests are expected to be in the first range.
836 */
837 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
838 RTGCPHYS off = GCPhys - pRam->GCPhys;
839 if (RT_UNLIKELY(off >= pRam->cb))
840 {
841 do
842 {
843 pRam = pRam->CTX_SUFF(pNext);
844 if (!pRam)
845 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
846 off = GCPhys - pRam->GCPhys;
847 } while (off >= pRam->cb);
848 }
849
850 /*
851 * Map the page.
852 * Make a special case for the zero page as it is kind of special.
853 */
854 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
855 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
856 if (!PGM_PAGE_IS_ZERO(pPage))
857 {
858 void *pv;
859 PPGMPAGEMAP pMap;
860 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
861 if (RT_FAILURE(rc))
862 return rc;
863 pTlbe->pMap = pMap;
864 pTlbe->pv = pv;
865 }
866 else
867 {
868 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
869 pTlbe->pMap = NULL;
870 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
871 }
872#ifdef IN_RING0
873 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
874#else
875 /* REM already has a TLB of its own; no point in having two
876 * and keeping both in sync will eliminate any benefit there might be.
877 */
878 pTlbe->GCPhys = NIL_RTGCPHYS;
879#endif
880 pTlbe->pPage = pPage;
881 return VINF_SUCCESS;
882}
883
884
885/**
886 * Load a guest page into the ring-3 physical TLB.
887 *
888 * @returns VBox status code.
889 * @retval VINF_SUCCESS on success
890 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
891 *
892 * @param pPGM The PGM instance pointer.
893 * @param pPage Pointer to the PGMPAGE structure corresponding to
894 * GCPhys.
895 * @param GCPhys The guest physical address in question.
896 */
897int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
898{
899 Assert(PGMIsLocked(PGM2VM(pPGM)));
900 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
901
902 /*
903 * Map the page.
904 * Make a special case for the zero page as it is kind of special.
905 */
906 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
907 if (!PGM_PAGE_IS_ZERO(pPage))
908 {
909 void *pv;
910 PPGMPAGEMAP pMap;
911 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
912 if (RT_FAILURE(rc))
913 return rc;
914 pTlbe->pMap = pMap;
915 pTlbe->pv = pv;
916 }
917 else
918 {
919 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
920 pTlbe->pMap = NULL;
921 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
922 }
923#ifdef IN_RING0
924 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
925#else
926 /* REM already has a TLB of its own; no point in having two
927 * and keeping both in sync will eliminate any benefit there might be.
928 */
929 pTlbe->GCPhys = NIL_RTGCPHYS;
930#endif
931 pTlbe->pPage = pPage;
932 return VINF_SUCCESS;
933}
934#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
935
936
937/**
938 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
939 * own the PGM lock and therefore not need to lock the mapped page.
940 *
941 * @returns VBox status code.
942 * @retval VINF_SUCCESS on success.
943 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
944 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
945 *
946 * @param pVM The VM handle.
947 * @param GCPhys The guest physical address of the page that should be mapped.
948 * @param pPage Pointer to the PGMPAGE structure for the page.
949 * @param ppv Where to store the address corresponding to GCPhys.
950 *
951 * @internal
952 */
953int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
954{
955 int rc;
956 AssertReturn(pPage, VERR_INTERNAL_ERROR);
957 Assert(PGMIsLocked(pVM));
958
959 /*
960 * Make sure the page is writable.
961 */
962 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
963 {
964 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
965 if (RT_FAILURE(rc))
966 return rc;
967 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
968 }
969 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
970
971 /*
972 * Get the mapping address.
973 */
974#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
975 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
976#else
977 PPGMPAGEMAPTLBE pTlbe;
978 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
979 if (RT_FAILURE(rc))
980 return rc;
981 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
982#endif
983 return VINF_SUCCESS;
984}
985
986
987/**
988 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
989 * own the PGM lock and therefore not need to lock the mapped page.
990 *
991 * @returns VBox status code.
992 * @retval VINF_SUCCESS on success.
993 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
994 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
995 *
996 * @param pVM The VM handle.
997 * @param GCPhys The guest physical address of the page that should be mapped.
998 * @param pPage Pointer to the PGMPAGE structure for the page.
999 * @param ppv Where to store the address corresponding to GCPhys.
1000 *
1001 * @internal
1002 */
1003int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
1004{
1005 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1006 Assert(PGMIsLocked(pVM));
1007 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1008
1009 /*
1010 * Get the mapping address.
1011 */
1012#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1013 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1014#else
1015 PPGMPAGEMAPTLBE pTlbe;
1016 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1017 if (RT_FAILURE(rc))
1018 return rc;
1019 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1020#endif
1021 return VINF_SUCCESS;
1022}
1023
1024
1025/**
1026 * Requests the mapping of a guest page into the current context.
1027 *
1028 * This API should only be used for very short term, as it will consume
1029 * scarse resources (R0 and GC) in the mapping cache. When you're done
1030 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1031 *
1032 * This API will assume your intention is to write to the page, and will
1033 * therefore replace shared and zero pages. If you do not intend to modify
1034 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1035 *
1036 * @returns VBox status code.
1037 * @retval VINF_SUCCESS on success.
1038 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1039 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1040 *
1041 * @param pVM The VM handle.
1042 * @param GCPhys The guest physical address of the page that should be mapped.
1043 * @param ppv Where to store the address corresponding to GCPhys.
1044 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1045 *
1046 * @remarks The caller is responsible for dealing with access handlers.
1047 * @todo Add an informational return code for pages with access handlers?
1048 *
1049 * @remark Avoid calling this API from within critical sections (other than the
1050 * PGM one) because of the deadlock risk. External threads may need to
1051 * delegate jobs to the EMTs.
1052 * @thread Any thread.
1053 */
1054VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1055{
1056#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1057
1058 /*
1059 * Find the page and make sure it's writable.
1060 */
1061 PPGMPAGE pPage;
1062 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1063 if (RT_SUCCESS(rc))
1064 {
1065 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1066 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1067 if (RT_SUCCESS(rc))
1068 {
1069 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1070# if 0
1071 pLock->pvMap = 0;
1072 pLock->pvPage = pPage;
1073# else
1074 pLock->u32Dummy = UINT32_MAX;
1075# endif
1076 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1077 rc = VINF_SUCCESS;
1078 }
1079 }
1080
1081#else /* IN_RING3 || IN_RING0 */
1082 int rc = pgmLock(pVM);
1083 AssertRCReturn(rc, rc);
1084
1085 /*
1086 * Query the Physical TLB entry for the page (may fail).
1087 */
1088 PPGMPAGEMAPTLBE pTlbe;
1089 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1090 if (RT_SUCCESS(rc))
1091 {
1092 /*
1093 * If the page is shared, the zero page, or being write monitored
1094 * it must be converted to an page that's writable if possible.
1095 */
1096 PPGMPAGE pPage = pTlbe->pPage;
1097 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1098 {
1099 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1100 if (RT_SUCCESS(rc))
1101 {
1102 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1103 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1104 }
1105 }
1106 if (RT_SUCCESS(rc))
1107 {
1108 /*
1109 * Now, just perform the locking and calculate the return address.
1110 */
1111 PPGMPAGEMAP pMap = pTlbe->pMap;
1112 if (pMap)
1113 pMap->cRefs++;
1114
1115 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1116 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1117 {
1118 if (cLocks == 0)
1119 pVM->pgm.s.cWriteLockedPages++;
1120 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1121 }
1122 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1123 {
1124 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1125 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1126 if (pMap)
1127 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1128 }
1129
1130 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1131 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1132 pLock->pvMap = pMap;
1133 }
1134 }
1135
1136 pgmUnlock(pVM);
1137#endif /* IN_RING3 || IN_RING0 */
1138 return rc;
1139}
1140
1141
1142/**
1143 * Requests the mapping of a guest page into the current context.
1144 *
1145 * This API should only be used for very short term, as it will consume
1146 * scarse resources (R0 and GC) in the mapping cache. When you're done
1147 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1148 *
1149 * @returns VBox status code.
1150 * @retval VINF_SUCCESS on success.
1151 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1152 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1153 *
1154 * @param pVM The VM handle.
1155 * @param GCPhys The guest physical address of the page that should be mapped.
1156 * @param ppv Where to store the address corresponding to GCPhys.
1157 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1158 *
1159 * @remarks The caller is responsible for dealing with access handlers.
1160 * @todo Add an informational return code for pages with access handlers?
1161 *
1162 * @remark Avoid calling this API from within critical sections (other than
1163 * the PGM one) because of the deadlock risk.
1164 * @thread Any thread.
1165 */
1166VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1167{
1168#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1169
1170 /*
1171 * Find the page and make sure it's readable.
1172 */
1173 PPGMPAGE pPage;
1174 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1175 if (RT_SUCCESS(rc))
1176 {
1177 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1178 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1179 else
1180 {
1181 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1182# if 0
1183 pLock->pvMap = 0;
1184 pLock->pvPage = pPage;
1185# else
1186 pLock->u32Dummy = UINT32_MAX;
1187# endif
1188 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1189 rc = VINF_SUCCESS;
1190 }
1191 }
1192
1193#else /* IN_RING3 || IN_RING0 */
1194 int rc = pgmLock(pVM);
1195 AssertRCReturn(rc, rc);
1196
1197 /*
1198 * Query the Physical TLB entry for the page (may fail).
1199 */
1200 PPGMPAGEMAPTLBE pTlbe;
1201 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1202 if (RT_SUCCESS(rc))
1203 {
1204 /* MMIO pages doesn't have any readable backing. */
1205 PPGMPAGE pPage = pTlbe->pPage;
1206 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1207 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1208 else
1209 {
1210 /*
1211 * Now, just perform the locking and calculate the return address.
1212 */
1213 PPGMPAGEMAP pMap = pTlbe->pMap;
1214 if (pMap)
1215 pMap->cRefs++;
1216
1217 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1218 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1219 {
1220 if (cLocks == 0)
1221 pVM->pgm.s.cReadLockedPages++;
1222 PGM_PAGE_INC_READ_LOCKS(pPage);
1223 }
1224 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1225 {
1226 PGM_PAGE_INC_READ_LOCKS(pPage);
1227 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1228 if (pMap)
1229 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1230 }
1231
1232 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1233 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1234 pLock->pvMap = pMap;
1235 }
1236 }
1237
1238 pgmUnlock(pVM);
1239#endif /* IN_RING3 || IN_RING0 */
1240 return rc;
1241}
1242
1243
1244/**
1245 * Requests the mapping of a guest page given by virtual address into the current context.
1246 *
1247 * This API should only be used for very short term, as it will consume
1248 * scarse resources (R0 and GC) in the mapping cache. When you're done
1249 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1250 *
1251 * This API will assume your intention is to write to the page, and will
1252 * therefore replace shared and zero pages. If you do not intend to modify
1253 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1254 *
1255 * @returns VBox status code.
1256 * @retval VINF_SUCCESS on success.
1257 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1258 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1259 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1260 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1261 *
1262 * @param pVCpu VMCPU handle.
1263 * @param GCPhys The guest physical address of the page that should be mapped.
1264 * @param ppv Where to store the address corresponding to GCPhys.
1265 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1266 *
1267 * @remark Avoid calling this API from within critical sections (other than
1268 * the PGM one) because of the deadlock risk.
1269 * @thread EMT
1270 */
1271VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1272{
1273 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1274 RTGCPHYS GCPhys;
1275 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1276 if (RT_SUCCESS(rc))
1277 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1278 return rc;
1279}
1280
1281
1282/**
1283 * Requests the mapping of a guest page given by virtual address into the current context.
1284 *
1285 * This API should only be used for very short term, as it will consume
1286 * scarse resources (R0 and GC) in the mapping cache. When you're done
1287 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1288 *
1289 * @returns VBox status code.
1290 * @retval VINF_SUCCESS on success.
1291 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1292 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1293 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1294 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1295 *
1296 * @param pVCpu VMCPU handle.
1297 * @param GCPhys The guest physical address of the page that should be mapped.
1298 * @param ppv Where to store the address corresponding to GCPhys.
1299 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1300 *
1301 * @remark Avoid calling this API from within critical sections (other than
1302 * the PGM one) because of the deadlock risk.
1303 * @thread EMT
1304 */
1305VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1306{
1307 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1308 RTGCPHYS GCPhys;
1309 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1310 if (RT_SUCCESS(rc))
1311 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1312 return rc;
1313}
1314
1315
1316/**
1317 * Release the mapping of a guest page.
1318 *
1319 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1320 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1321 *
1322 * @param pVM The VM handle.
1323 * @param pLock The lock structure initialized by the mapping function.
1324 */
1325VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1326{
1327#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1328 /* currently nothing to do here. */
1329 Assert(pLock->u32Dummy == UINT32_MAX);
1330 pLock->u32Dummy = 0;
1331
1332#else /* IN_RING3 */
1333 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1334 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1335 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1336
1337 pLock->uPageAndType = 0;
1338 pLock->pvMap = NULL;
1339
1340 pgmLock(pVM);
1341 if (fWriteLock)
1342 {
1343 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1344 Assert(cLocks > 0);
1345 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1346 {
1347 if (cLocks == 1)
1348 {
1349 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1350 pVM->pgm.s.cWriteLockedPages--;
1351 }
1352 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1353 }
1354
1355 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1356 {
1357 PGM_PAGE_SET_WRITTEN_TO(pPage);
1358 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1359 Assert(pVM->pgm.s.cMonitoredPages > 0);
1360 pVM->pgm.s.cMonitoredPages--;
1361 pVM->pgm.s.cWrittenToPages++;
1362 }
1363 }
1364 else
1365 {
1366 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1367 Assert(cLocks > 0);
1368 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1369 {
1370 if (cLocks == 1)
1371 {
1372 Assert(pVM->pgm.s.cReadLockedPages > 0);
1373 pVM->pgm.s.cReadLockedPages--;
1374 }
1375 PGM_PAGE_DEC_READ_LOCKS(pPage);
1376 }
1377 }
1378
1379 if (pMap)
1380 {
1381 Assert(pMap->cRefs >= 1);
1382 pMap->cRefs--;
1383 pMap->iAge = 0;
1384 }
1385 pgmUnlock(pVM);
1386#endif /* IN_RING3 */
1387}
1388
1389
1390/**
1391 * Converts a GC physical address to a HC ring-3 pointer.
1392 *
1393 * @returns VINF_SUCCESS on success.
1394 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1395 * page but has no physical backing.
1396 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1397 * GC physical address.
1398 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1399 * a dynamic ram chunk boundary
1400 *
1401 * @param pVM The VM handle.
1402 * @param GCPhys The GC physical address to convert.
1403 * @param cbRange Physical range
1404 * @param pR3Ptr Where to store the R3 pointer on success.
1405 *
1406 * @deprecated Avoid when possible!
1407 */
1408VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1409{
1410/** @todo this is kind of hacky and needs some more work. */
1411#ifndef DEBUG_sandervl
1412 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1413#endif
1414
1415 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1416#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1417 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1418#else
1419 pgmLock(pVM);
1420
1421 PPGMRAMRANGE pRam;
1422 PPGMPAGE pPage;
1423 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1424 if (RT_SUCCESS(rc))
1425 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1426
1427 pgmUnlock(pVM);
1428 Assert(rc <= VINF_SUCCESS);
1429 return rc;
1430#endif
1431}
1432
1433
1434#ifdef VBOX_STRICT
1435/**
1436 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1437 *
1438 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1439 * @param pVM The VM handle.
1440 * @param GCPhys The GC Physical addresss.
1441 * @param cbRange Physical range.
1442 *
1443 * @deprecated Avoid when possible.
1444 */
1445VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1446{
1447 RTR3PTR R3Ptr;
1448 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1449 if (RT_SUCCESS(rc))
1450 return R3Ptr;
1451 return NIL_RTR3PTR;
1452}
1453#endif /* VBOX_STRICT */
1454
1455
1456/**
1457 * Converts a guest pointer to a GC physical address.
1458 *
1459 * This uses the current CR3/CR0/CR4 of the guest.
1460 *
1461 * @returns VBox status code.
1462 * @param pVCpu The VMCPU Handle
1463 * @param GCPtr The guest pointer to convert.
1464 * @param pGCPhys Where to store the GC physical address.
1465 */
1466VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1467{
1468 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1469 if (pGCPhys && RT_SUCCESS(rc))
1470 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1471 return rc;
1472}
1473
1474
1475/**
1476 * Converts a guest pointer to a HC physical address.
1477 *
1478 * This uses the current CR3/CR0/CR4 of the guest.
1479 *
1480 * @returns VBox status code.
1481 * @param pVCpu The VMCPU Handle
1482 * @param GCPtr The guest pointer to convert.
1483 * @param pHCPhys Where to store the HC physical address.
1484 */
1485VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1486{
1487 PVM pVM = pVCpu->CTX_SUFF(pVM);
1488 RTGCPHYS GCPhys;
1489 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1490 if (RT_SUCCESS(rc))
1491 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1492 return rc;
1493}
1494
1495
1496/**
1497 * Converts a guest pointer to a R3 pointer.
1498 *
1499 * This uses the current CR3/CR0/CR4 of the guest.
1500 *
1501 * @returns VBox status code.
1502 * @param pVCpu The VMCPU Handle
1503 * @param GCPtr The guest pointer to convert.
1504 * @param pR3Ptr Where to store the R3 virtual address.
1505 *
1506 * @deprecated Don't use this.
1507 */
1508VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1509{
1510 PVM pVM = pVCpu->CTX_SUFF(pVM);
1511 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1512 RTGCPHYS GCPhys;
1513 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1514 if (RT_SUCCESS(rc))
1515 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1516 return rc;
1517}
1518
1519
1520
1521#undef LOG_GROUP
1522#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1523
1524
1525#ifdef IN_RING3
1526/**
1527 * Cache PGMPhys memory access
1528 *
1529 * @param pVM VM Handle.
1530 * @param pCache Cache structure pointer
1531 * @param GCPhys GC physical address
1532 * @param pbHC HC pointer corresponding to physical page
1533 *
1534 * @thread EMT.
1535 */
1536static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1537{
1538 uint32_t iCacheIndex;
1539
1540 Assert(VM_IS_EMT(pVM));
1541
1542 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1543 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1544
1545 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1546
1547 ASMBitSet(&pCache->aEntries, iCacheIndex);
1548
1549 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1550 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1551}
1552#endif /* IN_RING3 */
1553
1554
1555/**
1556 * Deals with reading from a page with one or more ALL access handlers.
1557 *
1558 * @returns VBox status code. Can be ignored in ring-3.
1559 * @retval VINF_SUCCESS.
1560 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1561 *
1562 * @param pVM The VM handle.
1563 * @param pPage The page descriptor.
1564 * @param GCPhys The physical address to start reading at.
1565 * @param pvBuf Where to put the bits we read.
1566 * @param cb How much to read - less or equal to a page.
1567 */
1568static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1569{
1570 /*
1571 * The most frequent access here is MMIO and shadowed ROM.
1572 * The current code ASSUMES all these access handlers covers full pages!
1573 */
1574
1575 /*
1576 * Whatever we do we need the source page, map it first.
1577 */
1578 const void *pvSrc = NULL;
1579 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1580 if (RT_FAILURE(rc))
1581 {
1582 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1583 GCPhys, pPage, rc));
1584 memset(pvBuf, 0xff, cb);
1585 return VINF_SUCCESS;
1586 }
1587 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1588
1589 /*
1590 * Deal with any physical handlers.
1591 */
1592 PPGMPHYSHANDLER pPhys = NULL;
1593 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1594 {
1595#ifdef IN_RING3
1596 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1597 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1598 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1599 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1600 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1601 Assert(pPhys->CTX_SUFF(pfnHandler));
1602
1603 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1604 void *pvUser = pPhys->CTX_SUFF(pvUser);
1605
1606 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1607 STAM_PROFILE_START(&pPhys->Stat, h);
1608 Assert(PGMIsLockOwner(pVM));
1609 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1610 pgmUnlock(pVM);
1611 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1612 pgmLock(pVM);
1613# ifdef VBOX_WITH_STATISTICS
1614 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1615 if (pPhys)
1616 STAM_PROFILE_STOP(&pPhys->Stat, h);
1617# else
1618 pPhys = NULL; /* might not be valid anymore. */
1619# endif
1620 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1621#else
1622 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1623 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1624 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1625#endif
1626 }
1627
1628 /*
1629 * Deal with any virtual handlers.
1630 */
1631 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1632 {
1633 unsigned iPage;
1634 PPGMVIRTHANDLER pVirt;
1635
1636 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1637 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1638 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1639 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1640 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1641
1642#ifdef IN_RING3
1643 if (pVirt->pfnHandlerR3)
1644 {
1645 if (!pPhys)
1646 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1647 else
1648 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1649 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1650 + (iPage << PAGE_SHIFT)
1651 + (GCPhys & PAGE_OFFSET_MASK);
1652
1653 STAM_PROFILE_START(&pVirt->Stat, h);
1654 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1655 STAM_PROFILE_STOP(&pVirt->Stat, h);
1656 if (rc2 == VINF_SUCCESS)
1657 rc = VINF_SUCCESS;
1658 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1659 }
1660 else
1661 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1662#else
1663 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1664 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1665 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1666#endif
1667 }
1668
1669 /*
1670 * Take the default action.
1671 */
1672 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1673 memcpy(pvBuf, pvSrc, cb);
1674 return rc;
1675}
1676
1677
1678/**
1679 * Read physical memory.
1680 *
1681 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1682 * want to ignore those.
1683 *
1684 * @returns VBox status code. Can be ignored in ring-3.
1685 * @retval VINF_SUCCESS.
1686 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1687 *
1688 * @param pVM VM Handle.
1689 * @param GCPhys Physical address start reading from.
1690 * @param pvBuf Where to put the read bits.
1691 * @param cbRead How many bytes to read.
1692 */
1693VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1694{
1695 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1696 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1697
1698 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1699 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1700
1701 pgmLock(pVM);
1702
1703 /*
1704 * Copy loop on ram ranges.
1705 */
1706 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1707 for (;;)
1708 {
1709 /* Find range. */
1710 while (pRam && GCPhys > pRam->GCPhysLast)
1711 pRam = pRam->CTX_SUFF(pNext);
1712 /* Inside range or not? */
1713 if (pRam && GCPhys >= pRam->GCPhys)
1714 {
1715 /*
1716 * Must work our way thru this page by page.
1717 */
1718 RTGCPHYS off = GCPhys - pRam->GCPhys;
1719 while (off < pRam->cb)
1720 {
1721 unsigned iPage = off >> PAGE_SHIFT;
1722 PPGMPAGE pPage = &pRam->aPages[iPage];
1723 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1724 if (cb > cbRead)
1725 cb = cbRead;
1726
1727 /*
1728 * Any ALL access handlers?
1729 */
1730 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1731 {
1732 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1733 if (RT_FAILURE(rc))
1734 {
1735 pgmUnlock(pVM);
1736 return rc;
1737 }
1738 }
1739 else
1740 {
1741 /*
1742 * Get the pointer to the page.
1743 */
1744 const void *pvSrc;
1745 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1746 if (RT_SUCCESS(rc))
1747 memcpy(pvBuf, pvSrc, cb);
1748 else
1749 {
1750 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1751 pRam->GCPhys + off, pPage, rc));
1752 memset(pvBuf, 0xff, cb);
1753 }
1754 }
1755
1756 /* next page */
1757 if (cb >= cbRead)
1758 {
1759 pgmUnlock(pVM);
1760 return VINF_SUCCESS;
1761 }
1762 cbRead -= cb;
1763 off += cb;
1764 pvBuf = (char *)pvBuf + cb;
1765 } /* walk pages in ram range. */
1766
1767 GCPhys = pRam->GCPhysLast + 1;
1768 }
1769 else
1770 {
1771 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1772
1773 /*
1774 * Unassigned address space.
1775 */
1776 if (!pRam)
1777 break;
1778 size_t cb = pRam->GCPhys - GCPhys;
1779 if (cb >= cbRead)
1780 {
1781 memset(pvBuf, 0xff, cbRead);
1782 break;
1783 }
1784 memset(pvBuf, 0xff, cb);
1785
1786 cbRead -= cb;
1787 pvBuf = (char *)pvBuf + cb;
1788 GCPhys += cb;
1789 }
1790 } /* Ram range walk */
1791
1792 pgmUnlock(pVM);
1793 return VINF_SUCCESS;
1794}
1795
1796
1797/**
1798 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1799 *
1800 * @returns VBox status code. Can be ignored in ring-3.
1801 * @retval VINF_SUCCESS.
1802 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1803 *
1804 * @param pVM The VM handle.
1805 * @param pPage The page descriptor.
1806 * @param GCPhys The physical address to start writing at.
1807 * @param pvBuf What to write.
1808 * @param cbWrite How much to write - less or equal to a page.
1809 */
1810static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1811{
1812 void *pvDst = NULL;
1813 int rc;
1814
1815 /*
1816 * Give priority to physical handlers (like #PF does).
1817 *
1818 * Hope for a lonely physical handler first that covers the whole
1819 * write area. This should be a pretty frequent case with MMIO and
1820 * the heavy usage of full page handlers in the page pool.
1821 */
1822 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1823 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1824 {
1825 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1826 if (pCur)
1827 {
1828 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1829 Assert(pCur->CTX_SUFF(pfnHandler));
1830
1831 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1832 if (cbRange > cbWrite)
1833 cbRange = cbWrite;
1834
1835#ifndef IN_RING3
1836 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1837 NOREF(cbRange);
1838 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1839 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1840
1841#else /* IN_RING3 */
1842 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1843 if (!PGM_PAGE_IS_MMIO(pPage))
1844 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1845 else
1846 rc = VINF_SUCCESS;
1847 if (RT_SUCCESS(rc))
1848 {
1849 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
1850 void *pvUser = pCur->CTX_SUFF(pvUser);
1851
1852 STAM_PROFILE_START(&pCur->Stat, h);
1853 Assert(PGMIsLockOwner(pVM));
1854 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1855 pgmUnlock(pVM);
1856 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
1857 pgmLock(pVM);
1858# ifdef VBOX_WITH_STATISTICS
1859 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1860 if (pCur)
1861 STAM_PROFILE_STOP(&pCur->Stat, h);
1862# else
1863 pCur = NULL; /* might not be valid anymore. */
1864# endif
1865 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1866 memcpy(pvDst, pvBuf, cbRange);
1867 else
1868 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
1869 }
1870 else
1871 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1872 GCPhys, pPage, rc), rc);
1873 if (RT_LIKELY(cbRange == cbWrite))
1874 return VINF_SUCCESS;
1875
1876 /* more fun to be had below */
1877 cbWrite -= cbRange;
1878 GCPhys += cbRange;
1879 pvBuf = (uint8_t *)pvBuf + cbRange;
1880 pvDst = (uint8_t *)pvDst + cbRange;
1881#endif /* IN_RING3 */
1882 }
1883 /* else: the handler is somewhere else in the page, deal with it below. */
1884 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1885 }
1886 /*
1887 * A virtual handler without any interfering physical handlers.
1888 * Hopefully it'll conver the whole write.
1889 */
1890 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1891 {
1892 unsigned iPage;
1893 PPGMVIRTHANDLER pCur;
1894 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1895 if (RT_SUCCESS(rc))
1896 {
1897 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1898 if (cbRange > cbWrite)
1899 cbRange = cbWrite;
1900
1901#ifndef IN_RING3
1902 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1903 NOREF(cbRange);
1904 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1905 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1906
1907#else /* IN_RING3 */
1908
1909 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1910 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1911 if (RT_SUCCESS(rc))
1912 {
1913 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1914 if (pCur->pfnHandlerR3)
1915 {
1916 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1917 + (iPage << PAGE_SHIFT)
1918 + (GCPhys & PAGE_OFFSET_MASK);
1919
1920 STAM_PROFILE_START(&pCur->Stat, h);
1921 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1922 STAM_PROFILE_STOP(&pCur->Stat, h);
1923 }
1924 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1925 memcpy(pvDst, pvBuf, cbRange);
1926 else
1927 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1928 }
1929 else
1930 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1931 GCPhys, pPage, rc), rc);
1932 if (RT_LIKELY(cbRange == cbWrite))
1933 return VINF_SUCCESS;
1934
1935 /* more fun to be had below */
1936 cbWrite -= cbRange;
1937 GCPhys += cbRange;
1938 pvBuf = (uint8_t *)pvBuf + cbRange;
1939 pvDst = (uint8_t *)pvDst + cbRange;
1940#endif
1941 }
1942 /* else: the handler is somewhere else in the page, deal with it below. */
1943 }
1944
1945 /*
1946 * Deal with all the odd ends.
1947 */
1948
1949 /* We need a writable destination page. */
1950 if (!pvDst)
1951 {
1952 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1953 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1954 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1955 GCPhys, pPage, rc), rc);
1956 }
1957
1958 /* The loop state (big + ugly). */
1959 unsigned iVirtPage = 0;
1960 PPGMVIRTHANDLER pVirt = NULL;
1961 uint32_t offVirt = PAGE_SIZE;
1962 uint32_t offVirtLast = PAGE_SIZE;
1963 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1964
1965 PPGMPHYSHANDLER pPhys = NULL;
1966 uint32_t offPhys = PAGE_SIZE;
1967 uint32_t offPhysLast = PAGE_SIZE;
1968 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1969
1970 /* The loop. */
1971 for (;;)
1972 {
1973 /*
1974 * Find the closest handler at or above GCPhys.
1975 */
1976 if (fMoreVirt && !pVirt)
1977 {
1978 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1979 if (RT_SUCCESS(rc))
1980 {
1981 offVirt = 0;
1982 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1983 }
1984 else
1985 {
1986 PPGMPHYS2VIRTHANDLER pVirtPhys;
1987 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1988 GCPhys, true /* fAbove */);
1989 if ( pVirtPhys
1990 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1991 {
1992 /* ASSUME that pVirtPhys only covers one page. */
1993 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1994 Assert(pVirtPhys->Core.Key > GCPhys);
1995
1996 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1997 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1998 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1999 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2000 }
2001 else
2002 {
2003 pVirt = NULL;
2004 fMoreVirt = false;
2005 offVirt = offVirtLast = PAGE_SIZE;
2006 }
2007 }
2008 }
2009
2010 if (fMorePhys && !pPhys)
2011 {
2012 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2013 if (pPhys)
2014 {
2015 offPhys = 0;
2016 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2017 }
2018 else
2019 {
2020 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2021 GCPhys, true /* fAbove */);
2022 if ( pPhys
2023 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2024 {
2025 offPhys = pPhys->Core.Key - GCPhys;
2026 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2027 }
2028 else
2029 {
2030 pPhys = NULL;
2031 fMorePhys = false;
2032 offPhys = offPhysLast = PAGE_SIZE;
2033 }
2034 }
2035 }
2036
2037 /*
2038 * Handle access to space without handlers (that's easy).
2039 */
2040 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2041 uint32_t cbRange = (uint32_t)cbWrite;
2042 if (offPhys && offVirt)
2043 {
2044 if (cbRange > offPhys)
2045 cbRange = offPhys;
2046 if (cbRange > offVirt)
2047 cbRange = offVirt;
2048 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2049 }
2050 /*
2051 * Physical handler.
2052 */
2053 else if (!offPhys && offVirt)
2054 {
2055 if (cbRange > offPhysLast + 1)
2056 cbRange = offPhysLast + 1;
2057 if (cbRange > offVirt)
2058 cbRange = offVirt;
2059#ifdef IN_RING3
2060 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2061 void *pvUser = pPhys->CTX_SUFF(pvUser);
2062
2063 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2064 STAM_PROFILE_START(&pPhys->Stat, h);
2065 Assert(PGMIsLockOwner(pVM));
2066 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2067 pgmUnlock(pVM);
2068 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2069 pgmLock(pVM);
2070# ifdef VBOX_WITH_STATISTICS
2071 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2072 if (pPhys)
2073 STAM_PROFILE_STOP(&pPhys->Stat, h);
2074# else
2075 pPhys = NULL; /* might not be valid anymore. */
2076# endif
2077 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2078#else
2079 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2080 NOREF(cbRange);
2081 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2082 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2083#endif
2084 }
2085 /*
2086 * Virtual handler.
2087 */
2088 else if (offPhys && !offVirt)
2089 {
2090 if (cbRange > offVirtLast + 1)
2091 cbRange = offVirtLast + 1;
2092 if (cbRange > offPhys)
2093 cbRange = offPhys;
2094#ifdef IN_RING3
2095 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2096 if (pVirt->pfnHandlerR3)
2097 {
2098 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2099 + (iVirtPage << PAGE_SHIFT)
2100 + (GCPhys & PAGE_OFFSET_MASK);
2101 STAM_PROFILE_START(&pVirt->Stat, h);
2102 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2103 STAM_PROFILE_STOP(&pVirt->Stat, h);
2104 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2105 }
2106 pVirt = NULL;
2107#else
2108 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2109 NOREF(cbRange);
2110 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2111 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2112#endif
2113 }
2114 /*
2115 * Both... give the physical one priority.
2116 */
2117 else
2118 {
2119 Assert(!offPhys && !offVirt);
2120 if (cbRange > offVirtLast + 1)
2121 cbRange = offVirtLast + 1;
2122 if (cbRange > offPhysLast + 1)
2123 cbRange = offPhysLast + 1;
2124
2125#ifdef IN_RING3
2126 if (pVirt->pfnHandlerR3)
2127 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2128 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2129
2130 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2131 void *pvUser = pPhys->CTX_SUFF(pvUser);
2132
2133 STAM_PROFILE_START(&pPhys->Stat, h);
2134 Assert(PGMIsLockOwner(pVM));
2135 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2136 pgmUnlock(pVM);
2137 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2138 pgmLock(pVM);
2139# ifdef VBOX_WITH_STATISTICS
2140 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2141 if (pPhys)
2142 STAM_PROFILE_STOP(&pPhys->Stat, h);
2143# else
2144 pPhys = NULL; /* might not be valid anymore. */
2145# endif
2146 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2147 if (pVirt->pfnHandlerR3)
2148 {
2149
2150 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2151 + (iVirtPage << PAGE_SHIFT)
2152 + (GCPhys & PAGE_OFFSET_MASK);
2153 STAM_PROFILE_START(&pVirt->Stat, h2);
2154 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2155 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2156 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2157 rc = VINF_SUCCESS;
2158 else
2159 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2160 }
2161 pPhys = NULL;
2162 pVirt = NULL;
2163#else
2164 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2165 NOREF(cbRange);
2166 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2167 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2168#endif
2169 }
2170 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2171 memcpy(pvDst, pvBuf, cbRange);
2172
2173 /*
2174 * Advance if we've got more stuff to do.
2175 */
2176 if (cbRange >= cbWrite)
2177 return VINF_SUCCESS;
2178
2179 cbWrite -= cbRange;
2180 GCPhys += cbRange;
2181 pvBuf = (uint8_t *)pvBuf + cbRange;
2182 pvDst = (uint8_t *)pvDst + cbRange;
2183
2184 offPhys -= cbRange;
2185 offPhysLast -= cbRange;
2186 offVirt -= cbRange;
2187 offVirtLast -= cbRange;
2188 }
2189}
2190
2191
2192/**
2193 * Write to physical memory.
2194 *
2195 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2196 * want to ignore those.
2197 *
2198 * @returns VBox status code. Can be ignored in ring-3.
2199 * @retval VINF_SUCCESS.
2200 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2201 *
2202 * @param pVM VM Handle.
2203 * @param GCPhys Physical address to write to.
2204 * @param pvBuf What to write.
2205 * @param cbWrite How many bytes to write.
2206 */
2207VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2208{
2209 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2210 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2211 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2212
2213 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2214 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2215
2216 pgmLock(pVM);
2217
2218 /*
2219 * Copy loop on ram ranges.
2220 */
2221 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2222 for (;;)
2223 {
2224 /* Find range. */
2225 while (pRam && GCPhys > pRam->GCPhysLast)
2226 pRam = pRam->CTX_SUFF(pNext);
2227 /* Inside range or not? */
2228 if (pRam && GCPhys >= pRam->GCPhys)
2229 {
2230 /*
2231 * Must work our way thru this page by page.
2232 */
2233 RTGCPTR off = GCPhys - pRam->GCPhys;
2234 while (off < pRam->cb)
2235 {
2236 RTGCPTR iPage = off >> PAGE_SHIFT;
2237 PPGMPAGE pPage = &pRam->aPages[iPage];
2238 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2239 if (cb > cbWrite)
2240 cb = cbWrite;
2241
2242 /*
2243 * Any active WRITE or ALL access handlers?
2244 */
2245 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2246 {
2247 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2248 if (RT_FAILURE(rc))
2249 {
2250 pgmUnlock(pVM);
2251 return rc;
2252 }
2253 }
2254 else
2255 {
2256 /*
2257 * Get the pointer to the page.
2258 */
2259 void *pvDst;
2260 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2261 if (RT_SUCCESS(rc))
2262 memcpy(pvDst, pvBuf, cb);
2263 else
2264 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2265 pRam->GCPhys + off, pPage, rc));
2266 }
2267
2268 /* next page */
2269 if (cb >= cbWrite)
2270 {
2271 pgmUnlock(pVM);
2272 return VINF_SUCCESS;
2273 }
2274
2275 cbWrite -= cb;
2276 off += cb;
2277 pvBuf = (const char *)pvBuf + cb;
2278 } /* walk pages in ram range */
2279
2280 GCPhys = pRam->GCPhysLast + 1;
2281 }
2282 else
2283 {
2284 /*
2285 * Unassigned address space, skip it.
2286 */
2287 if (!pRam)
2288 break;
2289 size_t cb = pRam->GCPhys - GCPhys;
2290 if (cb >= cbWrite)
2291 break;
2292 cbWrite -= cb;
2293 pvBuf = (const char *)pvBuf + cb;
2294 GCPhys += cb;
2295 }
2296 } /* Ram range walk */
2297
2298 pgmUnlock(pVM);
2299 return VINF_SUCCESS;
2300}
2301
2302
2303/**
2304 * Read from guest physical memory by GC physical address, bypassing
2305 * MMIO and access handlers.
2306 *
2307 * @returns VBox status.
2308 * @param pVM VM handle.
2309 * @param pvDst The destination address.
2310 * @param GCPhysSrc The source address (GC physical address).
2311 * @param cb The number of bytes to read.
2312 */
2313VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2314{
2315 /*
2316 * Treat the first page as a special case.
2317 */
2318 if (!cb)
2319 return VINF_SUCCESS;
2320
2321 /* map the 1st page */
2322 void const *pvSrc;
2323 PGMPAGEMAPLOCK Lock;
2324 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2325 if (RT_FAILURE(rc))
2326 return rc;
2327
2328 /* optimize for the case where access is completely within the first page. */
2329 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2330 if (RT_LIKELY(cb <= cbPage))
2331 {
2332 memcpy(pvDst, pvSrc, cb);
2333 PGMPhysReleasePageMappingLock(pVM, &Lock);
2334 return VINF_SUCCESS;
2335 }
2336
2337 /* copy to the end of the page. */
2338 memcpy(pvDst, pvSrc, cbPage);
2339 PGMPhysReleasePageMappingLock(pVM, &Lock);
2340 GCPhysSrc += cbPage;
2341 pvDst = (uint8_t *)pvDst + cbPage;
2342 cb -= cbPage;
2343
2344 /*
2345 * Page by page.
2346 */
2347 for (;;)
2348 {
2349 /* map the page */
2350 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2351 if (RT_FAILURE(rc))
2352 return rc;
2353
2354 /* last page? */
2355 if (cb <= PAGE_SIZE)
2356 {
2357 memcpy(pvDst, pvSrc, cb);
2358 PGMPhysReleasePageMappingLock(pVM, &Lock);
2359 return VINF_SUCCESS;
2360 }
2361
2362 /* copy the entire page and advance */
2363 memcpy(pvDst, pvSrc, PAGE_SIZE);
2364 PGMPhysReleasePageMappingLock(pVM, &Lock);
2365 GCPhysSrc += PAGE_SIZE;
2366 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2367 cb -= PAGE_SIZE;
2368 }
2369 /* won't ever get here. */
2370}
2371
2372
2373/**
2374 * Write to guest physical memory referenced by GC pointer.
2375 * Write memory to GC physical address in guest physical memory.
2376 *
2377 * This will bypass MMIO and access handlers.
2378 *
2379 * @returns VBox status.
2380 * @param pVM VM handle.
2381 * @param GCPhysDst The GC physical address of the destination.
2382 * @param pvSrc The source buffer.
2383 * @param cb The number of bytes to write.
2384 */
2385VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2386{
2387 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2388
2389 /*
2390 * Treat the first page as a special case.
2391 */
2392 if (!cb)
2393 return VINF_SUCCESS;
2394
2395 /* map the 1st page */
2396 void *pvDst;
2397 PGMPAGEMAPLOCK Lock;
2398 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2399 if (RT_FAILURE(rc))
2400 return rc;
2401
2402 /* optimize for the case where access is completely within the first page. */
2403 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2404 if (RT_LIKELY(cb <= cbPage))
2405 {
2406 memcpy(pvDst, pvSrc, cb);
2407 PGMPhysReleasePageMappingLock(pVM, &Lock);
2408 return VINF_SUCCESS;
2409 }
2410
2411 /* copy to the end of the page. */
2412 memcpy(pvDst, pvSrc, cbPage);
2413 PGMPhysReleasePageMappingLock(pVM, &Lock);
2414 GCPhysDst += cbPage;
2415 pvSrc = (const uint8_t *)pvSrc + cbPage;
2416 cb -= cbPage;
2417
2418 /*
2419 * Page by page.
2420 */
2421 for (;;)
2422 {
2423 /* map the page */
2424 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2425 if (RT_FAILURE(rc))
2426 return rc;
2427
2428 /* last page? */
2429 if (cb <= PAGE_SIZE)
2430 {
2431 memcpy(pvDst, pvSrc, cb);
2432 PGMPhysReleasePageMappingLock(pVM, &Lock);
2433 return VINF_SUCCESS;
2434 }
2435
2436 /* copy the entire page and advance */
2437 memcpy(pvDst, pvSrc, PAGE_SIZE);
2438 PGMPhysReleasePageMappingLock(pVM, &Lock);
2439 GCPhysDst += PAGE_SIZE;
2440 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2441 cb -= PAGE_SIZE;
2442 }
2443 /* won't ever get here. */
2444}
2445
2446
2447/**
2448 * Read from guest physical memory referenced by GC pointer.
2449 *
2450 * This function uses the current CR3/CR0/CR4 of the guest and will
2451 * bypass access handlers and not set any accessed bits.
2452 *
2453 * @returns VBox status.
2454 * @param pVCpu The VMCPU handle.
2455 * @param pvDst The destination address.
2456 * @param GCPtrSrc The source address (GC pointer).
2457 * @param cb The number of bytes to read.
2458 */
2459VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2460{
2461 PVM pVM = pVCpu->CTX_SUFF(pVM);
2462
2463 /*
2464 * Treat the first page as a special case.
2465 */
2466 if (!cb)
2467 return VINF_SUCCESS;
2468
2469 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2470 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2471
2472 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2473 * when many VCPUs are fighting for the lock.
2474 */
2475 pgmLock(pVM);
2476
2477 /* map the 1st page */
2478 void const *pvSrc;
2479 PGMPAGEMAPLOCK Lock;
2480 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2481 if (RT_FAILURE(rc))
2482 {
2483 pgmUnlock(pVM);
2484 return rc;
2485 }
2486
2487 /* optimize for the case where access is completely within the first page. */
2488 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2489 if (RT_LIKELY(cb <= cbPage))
2490 {
2491 memcpy(pvDst, pvSrc, cb);
2492 PGMPhysReleasePageMappingLock(pVM, &Lock);
2493 pgmUnlock(pVM);
2494 return VINF_SUCCESS;
2495 }
2496
2497 /* copy to the end of the page. */
2498 memcpy(pvDst, pvSrc, cbPage);
2499 PGMPhysReleasePageMappingLock(pVM, &Lock);
2500 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2501 pvDst = (uint8_t *)pvDst + cbPage;
2502 cb -= cbPage;
2503
2504 /*
2505 * Page by page.
2506 */
2507 for (;;)
2508 {
2509 /* map the page */
2510 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2511 if (RT_FAILURE(rc))
2512 {
2513 pgmUnlock(pVM);
2514 return rc;
2515 }
2516
2517 /* last page? */
2518 if (cb <= PAGE_SIZE)
2519 {
2520 memcpy(pvDst, pvSrc, cb);
2521 PGMPhysReleasePageMappingLock(pVM, &Lock);
2522 pgmUnlock(pVM);
2523 return VINF_SUCCESS;
2524 }
2525
2526 /* copy the entire page and advance */
2527 memcpy(pvDst, pvSrc, PAGE_SIZE);
2528 PGMPhysReleasePageMappingLock(pVM, &Lock);
2529 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2530 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2531 cb -= PAGE_SIZE;
2532 }
2533 /* won't ever get here. */
2534}
2535
2536
2537/**
2538 * Write to guest physical memory referenced by GC pointer.
2539 *
2540 * This function uses the current CR3/CR0/CR4 of the guest and will
2541 * bypass access handlers and not set dirty or accessed bits.
2542 *
2543 * @returns VBox status.
2544 * @param pVCpu The VMCPU handle.
2545 * @param GCPtrDst The destination address (GC pointer).
2546 * @param pvSrc The source address.
2547 * @param cb The number of bytes to write.
2548 */
2549VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2550{
2551 PVM pVM = pVCpu->CTX_SUFF(pVM);
2552
2553 /*
2554 * Treat the first page as a special case.
2555 */
2556 if (!cb)
2557 return VINF_SUCCESS;
2558
2559 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2560 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2561
2562 /* map the 1st page */
2563 void *pvDst;
2564 PGMPAGEMAPLOCK Lock;
2565 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2566 if (RT_FAILURE(rc))
2567 return rc;
2568
2569 /* optimize for the case where access is completely within the first page. */
2570 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2571 if (RT_LIKELY(cb <= cbPage))
2572 {
2573 memcpy(pvDst, pvSrc, cb);
2574 PGMPhysReleasePageMappingLock(pVM, &Lock);
2575 return VINF_SUCCESS;
2576 }
2577
2578 /* copy to the end of the page. */
2579 memcpy(pvDst, pvSrc, cbPage);
2580 PGMPhysReleasePageMappingLock(pVM, &Lock);
2581 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2582 pvSrc = (const uint8_t *)pvSrc + cbPage;
2583 cb -= cbPage;
2584
2585 /*
2586 * Page by page.
2587 */
2588 for (;;)
2589 {
2590 /* map the page */
2591 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2592 if (RT_FAILURE(rc))
2593 return rc;
2594
2595 /* last page? */
2596 if (cb <= PAGE_SIZE)
2597 {
2598 memcpy(pvDst, pvSrc, cb);
2599 PGMPhysReleasePageMappingLock(pVM, &Lock);
2600 return VINF_SUCCESS;
2601 }
2602
2603 /* copy the entire page and advance */
2604 memcpy(pvDst, pvSrc, PAGE_SIZE);
2605 PGMPhysReleasePageMappingLock(pVM, &Lock);
2606 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2607 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2608 cb -= PAGE_SIZE;
2609 }
2610 /* won't ever get here. */
2611}
2612
2613
2614/**
2615 * Write to guest physical memory referenced by GC pointer and update the PTE.
2616 *
2617 * This function uses the current CR3/CR0/CR4 of the guest and will
2618 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2619 *
2620 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2621 *
2622 * @returns VBox status.
2623 * @param pVCpu The VMCPU handle.
2624 * @param GCPtrDst The destination address (GC pointer).
2625 * @param pvSrc The source address.
2626 * @param cb The number of bytes to write.
2627 */
2628VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2629{
2630 PVM pVM = pVCpu->CTX_SUFF(pVM);
2631
2632 /*
2633 * Treat the first page as a special case.
2634 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2635 */
2636 if (!cb)
2637 return VINF_SUCCESS;
2638
2639 /* map the 1st page */
2640 void *pvDst;
2641 PGMPAGEMAPLOCK Lock;
2642 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2643 if (RT_FAILURE(rc))
2644 return rc;
2645
2646 /* optimize for the case where access is completely within the first page. */
2647 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2648 if (RT_LIKELY(cb <= cbPage))
2649 {
2650 memcpy(pvDst, pvSrc, cb);
2651 PGMPhysReleasePageMappingLock(pVM, &Lock);
2652 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2653 return VINF_SUCCESS;
2654 }
2655
2656 /* copy to the end of the page. */
2657 memcpy(pvDst, pvSrc, cbPage);
2658 PGMPhysReleasePageMappingLock(pVM, &Lock);
2659 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2660 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2661 pvSrc = (const uint8_t *)pvSrc + cbPage;
2662 cb -= cbPage;
2663
2664 /*
2665 * Page by page.
2666 */
2667 for (;;)
2668 {
2669 /* map the page */
2670 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2671 if (RT_FAILURE(rc))
2672 return rc;
2673
2674 /* last page? */
2675 if (cb <= PAGE_SIZE)
2676 {
2677 memcpy(pvDst, pvSrc, cb);
2678 PGMPhysReleasePageMappingLock(pVM, &Lock);
2679 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2680 return VINF_SUCCESS;
2681 }
2682
2683 /* copy the entire page and advance */
2684 memcpy(pvDst, pvSrc, PAGE_SIZE);
2685 PGMPhysReleasePageMappingLock(pVM, &Lock);
2686 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2687 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2688 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2689 cb -= PAGE_SIZE;
2690 }
2691 /* won't ever get here. */
2692}
2693
2694
2695/**
2696 * Read from guest physical memory referenced by GC pointer.
2697 *
2698 * This function uses the current CR3/CR0/CR4 of the guest and will
2699 * respect access handlers and set accessed bits.
2700 *
2701 * @returns VBox status.
2702 * @param pVCpu The VMCPU handle.
2703 * @param pvDst The destination address.
2704 * @param GCPtrSrc The source address (GC pointer).
2705 * @param cb The number of bytes to read.
2706 * @thread The vCPU EMT.
2707 */
2708VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2709{
2710 RTGCPHYS GCPhys;
2711 uint64_t fFlags;
2712 int rc;
2713 PVM pVM = pVCpu->CTX_SUFF(pVM);
2714
2715 /*
2716 * Anything to do?
2717 */
2718 if (!cb)
2719 return VINF_SUCCESS;
2720
2721 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2722
2723 /*
2724 * Optimize reads within a single page.
2725 */
2726 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2727 {
2728 /* Convert virtual to physical address + flags */
2729 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2730 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2731 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2732
2733 /* mark the guest page as accessed. */
2734 if (!(fFlags & X86_PTE_A))
2735 {
2736 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2737 AssertRC(rc);
2738 }
2739
2740 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2741 }
2742
2743 /*
2744 * Page by page.
2745 */
2746 for (;;)
2747 {
2748 /* Convert virtual to physical address + flags */
2749 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2750 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2751 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2752
2753 /* mark the guest page as accessed. */
2754 if (!(fFlags & X86_PTE_A))
2755 {
2756 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2757 AssertRC(rc);
2758 }
2759
2760 /* copy */
2761 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2762 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2763 if (cbRead >= cb || RT_FAILURE(rc))
2764 return rc;
2765
2766 /* next */
2767 cb -= cbRead;
2768 pvDst = (uint8_t *)pvDst + cbRead;
2769 GCPtrSrc += cbRead;
2770 }
2771}
2772
2773
2774/**
2775 * Write to guest physical memory referenced by GC pointer.
2776 *
2777 * This function uses the current CR3/CR0/CR4 of the guest and will
2778 * respect access handlers and set dirty and accessed bits.
2779 *
2780 * @returns VBox status.
2781 * @retval VINF_SUCCESS.
2782 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2783 *
2784 * @param pVCpu The VMCPU handle.
2785 * @param GCPtrDst The destination address (GC pointer).
2786 * @param pvSrc The source address.
2787 * @param cb The number of bytes to write.
2788 */
2789VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2790{
2791 RTGCPHYS GCPhys;
2792 uint64_t fFlags;
2793 int rc;
2794 PVM pVM = pVCpu->CTX_SUFF(pVM);
2795
2796 /*
2797 * Anything to do?
2798 */
2799 if (!cb)
2800 return VINF_SUCCESS;
2801
2802 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2803
2804 /*
2805 * Optimize writes within a single page.
2806 */
2807 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2808 {
2809 /* Convert virtual to physical address + flags */
2810 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2811 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2812 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2813
2814 /* Mention when we ignore X86_PTE_RW... */
2815 if (!(fFlags & X86_PTE_RW))
2816 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2817
2818 /* Mark the guest page as accessed and dirty if necessary. */
2819 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2820 {
2821 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2822 AssertRC(rc);
2823 }
2824
2825 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2826 }
2827
2828 /*
2829 * Page by page.
2830 */
2831 for (;;)
2832 {
2833 /* Convert virtual to physical address + flags */
2834 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2835 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2836 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2837
2838 /* Mention when we ignore X86_PTE_RW... */
2839 if (!(fFlags & X86_PTE_RW))
2840 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2841
2842 /* Mark the guest page as accessed and dirty if necessary. */
2843 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2844 {
2845 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2846 AssertRC(rc);
2847 }
2848
2849 /* copy */
2850 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2851 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2852 if (cbWrite >= cb || RT_FAILURE(rc))
2853 return rc;
2854
2855 /* next */
2856 cb -= cbWrite;
2857 pvSrc = (uint8_t *)pvSrc + cbWrite;
2858 GCPtrDst += cbWrite;
2859 }
2860}
2861
2862
2863/**
2864 * Performs a read of guest virtual memory for instruction emulation.
2865 *
2866 * This will check permissions, raise exceptions and update the access bits.
2867 *
2868 * The current implementation will bypass all access handlers. It may later be
2869 * changed to at least respect MMIO.
2870 *
2871 *
2872 * @returns VBox status code suitable to scheduling.
2873 * @retval VINF_SUCCESS if the read was performed successfully.
2874 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2875 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2876 *
2877 * @param pVCpu The VMCPU handle.
2878 * @param pCtxCore The context core.
2879 * @param pvDst Where to put the bytes we've read.
2880 * @param GCPtrSrc The source address.
2881 * @param cb The number of bytes to read. Not more than a page.
2882 *
2883 * @remark This function will dynamically map physical pages in GC. This may unmap
2884 * mappings done by the caller. Be careful!
2885 */
2886VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2887{
2888 PVM pVM = pVCpu->CTX_SUFF(pVM);
2889 Assert(cb <= PAGE_SIZE);
2890
2891/** @todo r=bird: This isn't perfect!
2892 * -# It's not checking for reserved bits being 1.
2893 * -# It's not correctly dealing with the access bit.
2894 * -# It's not respecting MMIO memory or any other access handlers.
2895 */
2896 /*
2897 * 1. Translate virtual to physical. This may fault.
2898 * 2. Map the physical address.
2899 * 3. Do the read operation.
2900 * 4. Set access bits if required.
2901 */
2902 int rc;
2903 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2904 if (cb <= cb1)
2905 {
2906 /*
2907 * Not crossing pages.
2908 */
2909 RTGCPHYS GCPhys;
2910 uint64_t fFlags;
2911 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
2912 if (RT_SUCCESS(rc))
2913 {
2914 /** @todo we should check reserved bits ... */
2915 void *pvSrc;
2916 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2917 switch (rc)
2918 {
2919 case VINF_SUCCESS:
2920 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2921 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2922 break;
2923 case VERR_PGM_PHYS_PAGE_RESERVED:
2924 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2925 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2926 break;
2927 default:
2928 return rc;
2929 }
2930
2931 /** @todo access bit emulation isn't 100% correct. */
2932 if (!(fFlags & X86_PTE_A))
2933 {
2934 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2935 AssertRC(rc);
2936 }
2937 return VINF_SUCCESS;
2938 }
2939 }
2940 else
2941 {
2942 /*
2943 * Crosses pages.
2944 */
2945 size_t cb2 = cb - cb1;
2946 uint64_t fFlags1;
2947 RTGCPHYS GCPhys1;
2948 uint64_t fFlags2;
2949 RTGCPHYS GCPhys2;
2950 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
2951 if (RT_SUCCESS(rc))
2952 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2953 if (RT_SUCCESS(rc))
2954 {
2955 /** @todo we should check reserved bits ... */
2956 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2957 void *pvSrc1;
2958 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2959 switch (rc)
2960 {
2961 case VINF_SUCCESS:
2962 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2963 break;
2964 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2965 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2966 break;
2967 default:
2968 return rc;
2969 }
2970
2971 void *pvSrc2;
2972 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2973 switch (rc)
2974 {
2975 case VINF_SUCCESS:
2976 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2977 break;
2978 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2979 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2980 break;
2981 default:
2982 return rc;
2983 }
2984
2985 if (!(fFlags1 & X86_PTE_A))
2986 {
2987 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2988 AssertRC(rc);
2989 }
2990 if (!(fFlags2 & X86_PTE_A))
2991 {
2992 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2993 AssertRC(rc);
2994 }
2995 return VINF_SUCCESS;
2996 }
2997 }
2998
2999 /*
3000 * Raise a #PF.
3001 */
3002 uint32_t uErr;
3003
3004 /* Get the current privilege level. */
3005 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3006 switch (rc)
3007 {
3008 case VINF_SUCCESS:
3009 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3010 break;
3011
3012 case VERR_PAGE_NOT_PRESENT:
3013 case VERR_PAGE_TABLE_NOT_PRESENT:
3014 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3015 break;
3016
3017 default:
3018 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3019 return rc;
3020 }
3021 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3022 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3023}
3024
3025
3026/**
3027 * Performs a read of guest virtual memory for instruction emulation.
3028 *
3029 * This will check permissions, raise exceptions and update the access bits.
3030 *
3031 * The current implementation will bypass all access handlers. It may later be
3032 * changed to at least respect MMIO.
3033 *
3034 *
3035 * @returns VBox status code suitable to scheduling.
3036 * @retval VINF_SUCCESS if the read was performed successfully.
3037 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3038 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3039 *
3040 * @param pVCpu The VMCPU handle.
3041 * @param pCtxCore The context core.
3042 * @param pvDst Where to put the bytes we've read.
3043 * @param GCPtrSrc The source address.
3044 * @param cb The number of bytes to read. Not more than a page.
3045 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3046 * an appropriate error status will be returned (no
3047 * informational at all).
3048 *
3049 *
3050 * @remarks Takes the PGM lock.
3051 * @remarks A page fault on the 2nd page of the access will be raised without
3052 * writing the bits on the first page since we're ASSUMING that the
3053 * caller is emulating an instruction access.
3054 * @remarks This function will dynamically map physical pages in GC. This may
3055 * unmap mappings done by the caller. Be careful!
3056 */
3057VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3058{
3059 PVM pVM = pVCpu->CTX_SUFF(pVM);
3060 Assert(cb <= PAGE_SIZE);
3061
3062 /*
3063 * 1. Translate virtual to physical. This may fault.
3064 * 2. Map the physical address.
3065 * 3. Do the read operation.
3066 * 4. Set access bits if required.
3067 */
3068 int rc;
3069 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3070 if (cb <= cb1)
3071 {
3072 /*
3073 * Not crossing pages.
3074 */
3075 RTGCPHYS GCPhys;
3076 uint64_t fFlags;
3077 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3078 if (RT_SUCCESS(rc))
3079 {
3080 if (1) /** @todo we should check reserved bits ... */
3081 {
3082 const void *pvSrc;
3083 PGMPAGEMAPLOCK Lock;
3084 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3085 switch (rc)
3086 {
3087 case VINF_SUCCESS:
3088 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3089 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3090 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3091 break;
3092 case VERR_PGM_PHYS_PAGE_RESERVED:
3093 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3094 memset(pvDst, 0xff, cb);
3095 break;
3096 default:
3097 AssertMsgFailed(("%Rrc\n", rc));
3098 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3099 return rc;
3100 }
3101 PGMPhysReleasePageMappingLock(pVM, &Lock);
3102
3103 if (!(fFlags & X86_PTE_A))
3104 {
3105 /** @todo access bit emulation isn't 100% correct. */
3106 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3107 AssertRC(rc);
3108 }
3109 return VINF_SUCCESS;
3110 }
3111 }
3112 }
3113 else
3114 {
3115 /*
3116 * Crosses pages.
3117 */
3118 size_t cb2 = cb - cb1;
3119 uint64_t fFlags1;
3120 RTGCPHYS GCPhys1;
3121 uint64_t fFlags2;
3122 RTGCPHYS GCPhys2;
3123 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3124 if (RT_SUCCESS(rc))
3125 {
3126 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3127 if (RT_SUCCESS(rc))
3128 {
3129 if (1) /** @todo we should check reserved bits ... */
3130 {
3131 const void *pvSrc;
3132 PGMPAGEMAPLOCK Lock;
3133 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3134 switch (rc)
3135 {
3136 case VINF_SUCCESS:
3137 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3138 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3139 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3140 PGMPhysReleasePageMappingLock(pVM, &Lock);
3141 break;
3142 case VERR_PGM_PHYS_PAGE_RESERVED:
3143 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3144 memset(pvDst, 0xff, cb1);
3145 break;
3146 default:
3147 AssertMsgFailed(("%Rrc\n", rc));
3148 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3149 return rc;
3150 }
3151
3152 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3153 switch (rc)
3154 {
3155 case VINF_SUCCESS:
3156 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3157 PGMPhysReleasePageMappingLock(pVM, &Lock);
3158 break;
3159 case VERR_PGM_PHYS_PAGE_RESERVED:
3160 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3161 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3162 break;
3163 default:
3164 AssertMsgFailed(("%Rrc\n", rc));
3165 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3166 return rc;
3167 }
3168
3169 if (!(fFlags1 & X86_PTE_A))
3170 {
3171 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3172 AssertRC(rc);
3173 }
3174 if (!(fFlags2 & X86_PTE_A))
3175 {
3176 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3177 AssertRC(rc);
3178 }
3179 return VINF_SUCCESS;
3180 }
3181 /* sort out which page */
3182 }
3183 else
3184 GCPtrSrc += cb1; /* fault on 2nd page */
3185 }
3186 }
3187
3188 /*
3189 * Raise a #PF if we're allowed to do that.
3190 */
3191 /* Calc the error bits. */
3192 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3193 uint32_t uErr;
3194 switch (rc)
3195 {
3196 case VINF_SUCCESS:
3197 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3198 rc = VERR_ACCESS_DENIED;
3199 break;
3200
3201 case VERR_PAGE_NOT_PRESENT:
3202 case VERR_PAGE_TABLE_NOT_PRESENT:
3203 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3204 break;
3205
3206 default:
3207 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3208 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3209 return rc;
3210 }
3211 if (fRaiseTrap)
3212 {
3213 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3214 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3215 }
3216 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3217 return rc;
3218}
3219
3220
3221/**
3222 * Performs a write to guest virtual memory for instruction emulation.
3223 *
3224 * This will check permissions, raise exceptions and update the dirty and access
3225 * bits.
3226 *
3227 * @returns VBox status code suitable to scheduling.
3228 * @retval VINF_SUCCESS if the read was performed successfully.
3229 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3230 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3231 *
3232 * @param pVCpu The VMCPU handle.
3233 * @param pCtxCore The context core.
3234 * @param GCPtrDst The destination address.
3235 * @param pvSrc What to write.
3236 * @param cb The number of bytes to write. Not more than a page.
3237 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3238 * an appropriate error status will be returned (no
3239 * informational at all).
3240 *
3241 * @remarks Takes the PGM lock.
3242 * @remarks A page fault on the 2nd page of the access will be raised without
3243 * writing the bits on the first page since we're ASSUMING that the
3244 * caller is emulating an instruction access.
3245 * @remarks This function will dynamically map physical pages in GC. This may
3246 * unmap mappings done by the caller. Be careful!
3247 */
3248VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3249{
3250 Assert(cb <= PAGE_SIZE);
3251 PVM pVM = pVCpu->CTX_SUFF(pVM);
3252
3253 /*
3254 * 1. Translate virtual to physical. This may fault.
3255 * 2. Map the physical address.
3256 * 3. Do the write operation.
3257 * 4. Set access bits if required.
3258 */
3259 int rc;
3260 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3261 if (cb <= cb1)
3262 {
3263 /*
3264 * Not crossing pages.
3265 */
3266 RTGCPHYS GCPhys;
3267 uint64_t fFlags;
3268 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3269 if (RT_SUCCESS(rc))
3270 {
3271 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3272 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3273 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3274 {
3275 void *pvDst;
3276 PGMPAGEMAPLOCK Lock;
3277 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3278 switch (rc)
3279 {
3280 case VINF_SUCCESS:
3281 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3282 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3283 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3284 PGMPhysReleasePageMappingLock(pVM, &Lock);
3285 break;
3286 case VERR_PGM_PHYS_PAGE_RESERVED:
3287 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3288 /* bit bucket */
3289 break;
3290 default:
3291 AssertMsgFailed(("%Rrc\n", rc));
3292 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3293 return rc;
3294 }
3295
3296 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3297 {
3298 /** @todo dirty & access bit emulation isn't 100% correct. */
3299 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3300 AssertRC(rc);
3301 }
3302 return VINF_SUCCESS;
3303 }
3304 rc = VERR_ACCESS_DENIED;
3305 }
3306 }
3307 else
3308 {
3309 /*
3310 * Crosses pages.
3311 */
3312 size_t cb2 = cb - cb1;
3313 uint64_t fFlags1;
3314 RTGCPHYS GCPhys1;
3315 uint64_t fFlags2;
3316 RTGCPHYS GCPhys2;
3317 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3318 if (RT_SUCCESS(rc))
3319 {
3320 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3321 if (RT_SUCCESS(rc))
3322 {
3323 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3324 && (fFlags2 & X86_PTE_RW))
3325 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3326 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3327 {
3328 void *pvDst;
3329 PGMPAGEMAPLOCK Lock;
3330 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3331 switch (rc)
3332 {
3333 case VINF_SUCCESS:
3334 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3335 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3336 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3337 PGMPhysReleasePageMappingLock(pVM, &Lock);
3338 break;
3339 case VERR_PGM_PHYS_PAGE_RESERVED:
3340 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3341 /* bit bucket */
3342 break;
3343 default:
3344 AssertMsgFailed(("%Rrc\n", rc));
3345 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3346 return rc;
3347 }
3348
3349 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3350 switch (rc)
3351 {
3352 case VINF_SUCCESS:
3353 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3354 PGMPhysReleasePageMappingLock(pVM, &Lock);
3355 break;
3356 case VERR_PGM_PHYS_PAGE_RESERVED:
3357 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3358 /* bit bucket */
3359 break;
3360 default:
3361 AssertMsgFailed(("%Rrc\n", rc));
3362 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3363 return rc;
3364 }
3365
3366 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3367 {
3368 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3369 AssertRC(rc);
3370 }
3371 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3372 {
3373 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3374 AssertRC(rc);
3375 }
3376 return VINF_SUCCESS;
3377 }
3378 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3379 GCPtrDst += cb1; /* fault on the 2nd page. */
3380 rc = VERR_ACCESS_DENIED;
3381 }
3382 else
3383 GCPtrDst += cb1; /* fault on the 2nd page. */
3384 }
3385 }
3386
3387 /*
3388 * Raise a #PF if we're allowed to do that.
3389 */
3390 /* Calc the error bits. */
3391 uint32_t uErr;
3392 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3393 switch (rc)
3394 {
3395 case VINF_SUCCESS:
3396 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3397 rc = VERR_ACCESS_DENIED;
3398 break;
3399
3400 case VERR_ACCESS_DENIED:
3401 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3402 break;
3403
3404 case VERR_PAGE_NOT_PRESENT:
3405 case VERR_PAGE_TABLE_NOT_PRESENT:
3406 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3407 break;
3408
3409 default:
3410 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3411 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3412 return rc;
3413 }
3414 if (fRaiseTrap)
3415 {
3416 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3417 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3418 }
3419 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3420 return rc;
3421}
3422
3423
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette