VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 39127

Last change on this file since 39127 was 39078, checked in by vboxsync, 13 years ago

VMM: -Wunused-parameter

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 140.8 KB
Line 
1/* $Id: PGMAllPhys.cpp 39078 2011-10-21 14:18:22Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#include <VBox/vmm/rem.h>
28#include "PGMInternal.h"
29#include <VBox/vmm/vm.h>
30#include "PGMInline.h"
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <iprt/assert.h>
34#include <iprt/string.h>
35#include <iprt/asm-amd64-x86.h>
36#include <VBox/log.h>
37#ifdef IN_RING3
38# include <iprt/thread.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** Enable the physical TLB. */
46#define PGM_WITH_PHYS_TLB
47
48
49
50#ifndef IN_RING3
51
52/**
53 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
54 * This simply pushes everything to the HC handler.
55 *
56 * @returns VBox status code (appropriate for trap handling and GC return).
57 * @param pVM VM Handle.
58 * @param uErrorCode CPU Error code.
59 * @param pRegFrame Trap register frame.
60 * @param pvFault The fault address (cr2).
61 * @param GCPhysFault The GC physical address corresponding to pvFault.
62 * @param pvUser User argument.
63 */
64VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
65{
66 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
67 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
68}
69
70
71/**
72 * \#PF Handler callback for Guest ROM range write access.
73 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
74 *
75 * @returns VBox status code (appropriate for trap handling and GC return).
76 * @param pVM VM Handle.
77 * @param uErrorCode CPU Error code.
78 * @param pRegFrame Trap register frame.
79 * @param pvFault The fault address (cr2).
80 * @param GCPhysFault The GC physical address corresponding to pvFault.
81 * @param pvUser User argument. Pointer to the ROM range structure.
82 */
83VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
84{
85 int rc;
86 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
87 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
88 PVMCPU pVCpu = VMMGetCpu(pVM);
89 NOREF(uErrorCode); NOREF(pvFault);
90
91 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
92 switch (pRom->aPages[iPage].enmProt)
93 {
94 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
95 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
96 {
97 /*
98 * If it's a simple instruction which doesn't change the cpu state
99 * we will simply skip it. Otherwise we'll have to defer it to REM.
100 */
101 uint32_t cbOp;
102 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
103 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
104 if ( RT_SUCCESS(rc)
105 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
106 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
107 {
108 switch (pDis->opcode)
109 {
110 /** @todo Find other instructions we can safely skip, possibly
111 * adding this kind of detection to DIS or EM. */
112 case OP_MOV:
113 pRegFrame->rip += cbOp;
114 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
115 return VINF_SUCCESS;
116 }
117 }
118 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
119 return rc;
120 break;
121 }
122
123 case PGMROMPROT_READ_RAM_WRITE_RAM:
124 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
125 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
126 AssertRC(rc);
127 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
128
129 case PGMROMPROT_READ_ROM_WRITE_RAM:
130 /* Handle it in ring-3 because it's *way* easier there. */
131 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
132 break;
133
134 default:
135 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
136 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
137 VERR_INTERNAL_ERROR);
138 }
139
140 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
141 return VINF_EM_RAW_EMULATE_INSTR;
142}
143
144#endif /* IN_RING3 */
145
146/**
147 * Invalidates the RAM range TLBs.
148 *
149 * @param pVM The VM handle.
150 */
151void pgmPhysInvalidRamRangeTlbs(PVM pVM)
152{
153 pgmLock(pVM);
154 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
155 {
156 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
157 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
158 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
159 }
160 pgmUnlock(pVM);
161}
162
163
164/**
165 * Tests if a value of type RTGCPHYS is negative if the type had been signed
166 * instead of unsigned.
167 *
168 * @returns @c true if negative, @c false if positive or zero.
169 * @param a_GCPhys The value to test.
170 * @todo Move me to iprt/types.h.
171 */
172#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
173
174
175/**
176 * Slow worker for pgmPhysGetRange.
177 *
178 * @copydoc pgmPhysGetRange
179 */
180PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
181{
182 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
183
184 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
185 while (pRam)
186 {
187 RTGCPHYS off = GCPhys - pRam->GCPhys;
188 if (off < pRam->cb)
189 {
190 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
191 return pRam;
192 }
193 if (RTGCPHYS_IS_NEGATIVE(off))
194 pRam = pRam->CTX_SUFF(pLeft);
195 else
196 pRam = pRam->CTX_SUFF(pRight);
197 }
198 return NULL;
199}
200
201
202/**
203 * Slow worker for pgmPhysGetRangeAtOrAbove.
204 *
205 * @copydoc pgmPhysGetRangeAtOrAbove
206 */
207PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
208{
209 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
210
211 PPGMRAMRANGE pLastLeft = NULL;
212 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
213 while (pRam)
214 {
215 RTGCPHYS off = GCPhys - pRam->GCPhys;
216 if (off < pRam->cb)
217 {
218 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
219 return pRam;
220 }
221 if (RTGCPHYS_IS_NEGATIVE(off))
222 {
223 pLastLeft = pRam;
224 pRam = pRam->CTX_SUFF(pLeft);
225 }
226 else
227 pRam = pRam->CTX_SUFF(pRight);
228 }
229 return pLastLeft;
230}
231
232
233/**
234 * Slow worker for pgmPhysGetPage.
235 *
236 * @copydoc pgmPhysGetPage
237 */
238PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
239{
240 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
241
242 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
243 while (pRam)
244 {
245 RTGCPHYS off = GCPhys - pRam->GCPhys;
246 if (off < pRam->cb)
247 {
248 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
249 return &pRam->aPages[off >> PAGE_SHIFT];
250 }
251
252 if (RTGCPHYS_IS_NEGATIVE(off))
253 pRam = pRam->CTX_SUFF(pLeft);
254 else
255 pRam = pRam->CTX_SUFF(pRight);
256 }
257 return NULL;
258}
259
260
261/**
262 * Slow worker for pgmPhysGetPageEx.
263 *
264 * @copydoc pgmPhysGetPageEx
265 */
266int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
267{
268 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
269
270 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
271 while (pRam)
272 {
273 RTGCPHYS off = GCPhys - pRam->GCPhys;
274 if (off < pRam->cb)
275 {
276 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
277 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
278 return VINF_SUCCESS;
279 }
280
281 if (RTGCPHYS_IS_NEGATIVE(off))
282 pRam = pRam->CTX_SUFF(pLeft);
283 else
284 pRam = pRam->CTX_SUFF(pRight);
285 }
286
287 *ppPage = NULL;
288 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
289}
290
291
292/**
293 * Slow worker for pgmPhysGetPageAndRangeEx.
294 *
295 * @copydoc pgmPhysGetPageAndRangeEx
296 */
297int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
298{
299 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
300
301 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
302 while (pRam)
303 {
304 RTGCPHYS off = GCPhys - pRam->GCPhys;
305 if (off < pRam->cb)
306 {
307 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
308 *ppRam = pRam;
309 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
310 return VINF_SUCCESS;
311 }
312
313 if (RTGCPHYS_IS_NEGATIVE(off))
314 pRam = pRam->CTX_SUFF(pLeft);
315 else
316 pRam = pRam->CTX_SUFF(pRight);
317 }
318
319 *ppRam = NULL;
320 *ppPage = NULL;
321 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
322}
323
324
325/**
326 * Checks if Address Gate 20 is enabled or not.
327 *
328 * @returns true if enabled.
329 * @returns false if disabled.
330 * @param pVCpu VMCPU handle.
331 */
332VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
333{
334 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
335 return pVCpu->pgm.s.fA20Enabled;
336}
337
338
339/**
340 * Validates a GC physical address.
341 *
342 * @returns true if valid.
343 * @returns false if invalid.
344 * @param pVM The VM handle.
345 * @param GCPhys The physical address to validate.
346 */
347VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
348{
349 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
350 return pPage != NULL;
351}
352
353
354/**
355 * Checks if a GC physical address is a normal page,
356 * i.e. not ROM, MMIO or reserved.
357 *
358 * @returns true if normal.
359 * @returns false if invalid, ROM, MMIO or reserved page.
360 * @param pVM The VM handle.
361 * @param GCPhys The physical address to check.
362 */
363VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
364{
365 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
366 return pPage
367 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
368}
369
370
371/**
372 * Converts a GC physical address to a HC physical address.
373 *
374 * @returns VINF_SUCCESS on success.
375 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
376 * page but has no physical backing.
377 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
378 * GC physical address.
379 *
380 * @param pVM The VM handle.
381 * @param GCPhys The GC physical address to convert.
382 * @param pHCPhys Where to store the HC physical address on success.
383 */
384VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
385{
386 pgmLock(pVM);
387 PPGMPAGE pPage;
388 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
389 if (RT_SUCCESS(rc))
390 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
391 pgmUnlock(pVM);
392 return rc;
393}
394
395
396/**
397 * Invalidates all page mapping TLBs.
398 *
399 * @param pVM The VM handle.
400 */
401void pgmPhysInvalidatePageMapTLB(PVM pVM)
402{
403 pgmLock(pVM);
404 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
405
406 /* Clear the shared R0/R3 TLB completely. */
407 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
408 {
409 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
410 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
411 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
412 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
413 }
414
415 /** @todo clear the RC TLB whenever we add it. */
416
417 pgmUnlock(pVM);
418}
419
420
421/**
422 * Invalidates a page mapping TLB entry
423 *
424 * @param pVM The VM handle.
425 * @param GCPhys GCPhys entry to flush
426 */
427void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
428{
429 PGM_LOCK_ASSERT_OWNER(pVM);
430
431 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
432
433#ifdef IN_RC
434 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
435 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
436 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
437 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
438 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
439#else
440 /* Clear the shared R0/R3 TLB entry. */
441 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
442 pTlbe->GCPhys = NIL_RTGCPHYS;
443 pTlbe->pPage = 0;
444 pTlbe->pMap = 0;
445 pTlbe->pv = 0;
446#endif
447
448 /** @todo clear the RC TLB whenever we add it. */
449}
450
451/**
452 * Makes sure that there is at least one handy page ready for use.
453 *
454 * This will also take the appropriate actions when reaching water-marks.
455 *
456 * @returns VBox status code.
457 * @retval VINF_SUCCESS on success.
458 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
459 *
460 * @param pVM The VM handle.
461 *
462 * @remarks Must be called from within the PGM critical section. It may
463 * nip back to ring-3/0 in some cases.
464 */
465static int pgmPhysEnsureHandyPage(PVM pVM)
466{
467 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
468
469 /*
470 * Do we need to do anything special?
471 */
472#ifdef IN_RING3
473 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
474#else
475 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
476#endif
477 {
478 /*
479 * Allocate pages only if we're out of them, or in ring-3, almost out.
480 */
481#ifdef IN_RING3
482 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
483#else
484 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
485#endif
486 {
487 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
488 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
489#ifdef IN_RING3
490 int rc = PGMR3PhysAllocateHandyPages(pVM);
491#else
492 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
493#endif
494 if (RT_UNLIKELY(rc != VINF_SUCCESS))
495 {
496 if (RT_FAILURE(rc))
497 return rc;
498 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
499 if (!pVM->pgm.s.cHandyPages)
500 {
501 LogRel(("PGM: no more handy pages!\n"));
502 return VERR_EM_NO_MEMORY;
503 }
504 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
505 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
506#ifdef IN_RING3
507 REMR3NotifyFF(pVM);
508#else
509 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
510#endif
511 }
512 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
513 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
514 ("%u\n", pVM->pgm.s.cHandyPages),
515 VERR_INTERNAL_ERROR);
516 }
517 else
518 {
519 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
520 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
521#ifndef IN_RING3
522 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
523 {
524 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
525 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
526 }
527#endif
528 }
529 }
530
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Replace a zero or shared page with new page that we can write to.
537 *
538 * @returns The following VBox status codes.
539 * @retval VINF_SUCCESS on success, pPage is modified.
540 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
541 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
542 *
543 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
544 *
545 * @param pVM The VM address.
546 * @param pPage The physical page tracking structure. This will
547 * be modified on success.
548 * @param GCPhys The address of the page.
549 *
550 * @remarks Must be called from within the PGM critical section. It may
551 * nip back to ring-3/0 in some cases.
552 *
553 * @remarks This function shouldn't really fail, however if it does
554 * it probably means we've screwed up the size of handy pages and/or
555 * the low-water mark. Or, that some device I/O is causing a lot of
556 * pages to be allocated while while the host is in a low-memory
557 * condition. This latter should be handled elsewhere and in a more
558 * controlled manner, it's on the @bugref{3170} todo list...
559 */
560int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
561{
562 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
563
564 /*
565 * Prereqs.
566 */
567 PGM_LOCK_ASSERT_OWNER(pVM);
568 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
569 Assert(!PGM_PAGE_IS_MMIO(pPage));
570
571# ifdef PGM_WITH_LARGE_PAGES
572 /*
573 * Try allocate a large page if applicable.
574 */
575 if ( PGMIsUsingLargePages(pVM)
576 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
577 {
578 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
579 PPGMPAGE pBasePage;
580
581 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
582 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
583 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
584 {
585 rc = pgmPhysAllocLargePage(pVM, GCPhys);
586 if (rc == VINF_SUCCESS)
587 return rc;
588 }
589 /* Mark the base as type page table, so we don't check over and over again. */
590 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
591
592 /* fall back to 4KB pages. */
593 }
594# endif
595
596 /*
597 * Flush any shadow page table mappings of the page.
598 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
599 */
600 bool fFlushTLBs = false;
601 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
602 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
603
604 /*
605 * Ensure that we've got a page handy, take it and use it.
606 */
607 int rc2 = pgmPhysEnsureHandyPage(pVM);
608 if (RT_FAILURE(rc2))
609 {
610 if (fFlushTLBs)
611 PGM_INVL_ALL_VCPU_TLBS(pVM);
612 Assert(rc2 == VERR_EM_NO_MEMORY);
613 return rc2;
614 }
615 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
616 PGM_LOCK_ASSERT_OWNER(pVM);
617 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
618 Assert(!PGM_PAGE_IS_MMIO(pPage));
619
620 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
621 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
622 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
623 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
624 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
625 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
626
627 /*
628 * There are one or two action to be taken the next time we allocate handy pages:
629 * - Tell the GMM (global memory manager) what the page is being used for.
630 * (Speeds up replacement operations - sharing and defragmenting.)
631 * - If the current backing is shared, it must be freed.
632 */
633 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
634 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
635
636 void *pvSharedPage = NULL;
637 if (PGM_PAGE_IS_SHARED(pPage))
638 {
639 /* Mark this shared page for freeing/dereferencing. */
640 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
641 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
642
643 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
644 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
645 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
646 pVM->pgm.s.cSharedPages--;
647
648 /* Grab the address of the page so we can make a copy later on. (safe) */
649 rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvSharedPage);
650 AssertRC(rc);
651 }
652 else
653 {
654 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
655 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
656 pVM->pgm.s.cZeroPages--;
657 }
658
659 /*
660 * Do the PGMPAGE modifications.
661 */
662 pVM->pgm.s.cPrivatePages++;
663 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
664 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
665 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
666 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
667 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
668
669 /* Copy the shared page contents to the replacement page. */
670 if (pvSharedPage)
671 {
672 /* Get the virtual address of the new page. */
673 PGMPAGEMAPLOCK PgMpLck;
674 void *pvNewPage;
675 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
676 if (RT_SUCCESS(rc))
677 {
678 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
679 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
680 }
681 }
682
683 if ( fFlushTLBs
684 && rc != VINF_PGM_GCPHYS_ALIASED)
685 PGM_INVL_ALL_VCPU_TLBS(pVM);
686 return rc;
687}
688
689#ifdef PGM_WITH_LARGE_PAGES
690
691/**
692 * Replace a 2 MB range of zero pages with new pages that we can write to.
693 *
694 * @returns The following VBox status codes.
695 * @retval VINF_SUCCESS on success, pPage is modified.
696 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
697 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
698 *
699 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
700 *
701 * @param pVM The VM address.
702 * @param GCPhys The address of the page.
703 *
704 * @remarks Must be called from within the PGM critical section. It may
705 * nip back to ring-3/0 in some cases.
706 */
707int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
708{
709 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
710 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
711
712 /*
713 * Prereqs.
714 */
715 PGM_LOCK_ASSERT_OWNER(pVM);
716 Assert(PGMIsUsingLargePages(pVM));
717
718 PPGMPAGE pFirstPage;
719 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
720 if ( RT_SUCCESS(rc)
721 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
722 {
723 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
724
725 /* Don't call this function for already allocated pages. */
726 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
727
728 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
729 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
730 {
731 /* Lazy approach: check all pages in the 2 MB range.
732 * The whole range must be ram and unallocated. */
733 GCPhys = GCPhysBase;
734 unsigned iPage;
735 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
736 {
737 PPGMPAGE pSubPage;
738 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
739 if ( RT_FAILURE(rc)
740 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
741 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
742 {
743 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
744 break;
745 }
746 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
747 GCPhys += PAGE_SIZE;
748 }
749 if (iPage != _2M/PAGE_SIZE)
750 {
751 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
752 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
753 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
754 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
755 }
756
757 /*
758 * Do the allocation.
759 */
760# ifdef IN_RING3
761 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
762# else
763 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
764# endif
765 if (RT_SUCCESS(rc))
766 {
767 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
768 pVM->pgm.s.cLargePages++;
769 return VINF_SUCCESS;
770 }
771
772 /* If we fail once, it most likely means the host's memory is too
773 fragmented; don't bother trying again. */
774 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
775 PGMSetLargePageUsage(pVM, false);
776 return rc;
777 }
778 }
779 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
780}
781
782
783/**
784 * Recheck the entire 2 MB range to see if we can use it again as a large page.
785 *
786 * @returns The following VBox status codes.
787 * @retval VINF_SUCCESS on success, the large page can be used again
788 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
789 *
790 * @param pVM The VM address.
791 * @param GCPhys The address of the page.
792 * @param pLargePage Page structure of the base page
793 */
794int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
795{
796 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
797
798 GCPhys &= X86_PDE2M_PAE_PG_MASK;
799
800 /* Check the base page. */
801 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
802 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
803 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
804 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
805 {
806 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
807 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
808 }
809
810 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
811 /* Check all remaining pages in the 2 MB range. */
812 unsigned i;
813 GCPhys += PAGE_SIZE;
814 for (i = 1; i < _2M/PAGE_SIZE; i++)
815 {
816 PPGMPAGE pPage;
817 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
818 AssertRCBreak(rc);
819
820 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
821 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
822 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
823 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
824 {
825 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
826 break;
827 }
828
829 GCPhys += PAGE_SIZE;
830 }
831 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
832
833 if (i == _2M/PAGE_SIZE)
834 {
835 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
836 pVM->pgm.s.cLargePagesDisabled--;
837 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
838 return VINF_SUCCESS;
839 }
840
841 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
842}
843
844#endif /* PGM_WITH_LARGE_PAGES */
845
846/**
847 * Deal with a write monitored page.
848 *
849 * @returns VBox strict status code.
850 *
851 * @param pVM The VM address.
852 * @param pPage The physical page tracking structure.
853 *
854 * @remarks Called from within the PGM critical section.
855 */
856void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
857{
858 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
859 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
860 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
861 Assert(pVM->pgm.s.cMonitoredPages > 0);
862 pVM->pgm.s.cMonitoredPages--;
863 pVM->pgm.s.cWrittenToPages++;
864}
865
866
867/**
868 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
869 *
870 * @returns VBox strict status code.
871 * @retval VINF_SUCCESS on success.
872 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
873 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
874 *
875 * @param pVM The VM address.
876 * @param pPage The physical page tracking structure.
877 * @param GCPhys The address of the page.
878 *
879 * @remarks Called from within the PGM critical section.
880 */
881int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
882{
883 PGM_LOCK_ASSERT_OWNER(pVM);
884 switch (PGM_PAGE_GET_STATE(pPage))
885 {
886 case PGM_PAGE_STATE_WRITE_MONITORED:
887 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
888 /* fall thru */
889 default: /* to shut up GCC */
890 case PGM_PAGE_STATE_ALLOCATED:
891 return VINF_SUCCESS;
892
893 /*
894 * Zero pages can be dummy pages for MMIO or reserved memory,
895 * so we need to check the flags before joining cause with
896 * shared page replacement.
897 */
898 case PGM_PAGE_STATE_ZERO:
899 if (PGM_PAGE_IS_MMIO(pPage))
900 return VERR_PGM_PHYS_PAGE_RESERVED;
901 /* fall thru */
902 case PGM_PAGE_STATE_SHARED:
903 return pgmPhysAllocPage(pVM, pPage, GCPhys);
904
905 /* Not allowed to write to ballooned pages. */
906 case PGM_PAGE_STATE_BALLOONED:
907 return VERR_PGM_PHYS_PAGE_BALLOONED;
908 }
909}
910
911
912/**
913 * Internal usage: Map the page specified by its GMM ID.
914 *
915 * This is similar to pgmPhysPageMap
916 *
917 * @returns VBox status code.
918 *
919 * @param pVM The VM handle.
920 * @param idPage The Page ID.
921 * @param HCPhys The physical address (for RC).
922 * @param ppv Where to store the mapping address.
923 *
924 * @remarks Called from within the PGM critical section. The mapping is only
925 * valid while you are inside this section.
926 */
927int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
928{
929 /*
930 * Validation.
931 */
932 PGM_LOCK_ASSERT_OWNER(pVM);
933 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
934 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
935 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
936
937#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
938 /*
939 * Map it by HCPhys.
940 */
941 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
942
943#else
944 /*
945 * Find/make Chunk TLB entry for the mapping chunk.
946 */
947 PPGMCHUNKR3MAP pMap;
948 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
949 if (pTlbe->idChunk == idChunk)
950 {
951 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
952 pMap = pTlbe->pChunk;
953 }
954 else
955 {
956 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
957
958 /*
959 * Find the chunk, map it if necessary.
960 */
961 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
962 if (pMap)
963 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
964 else
965 {
966# ifdef IN_RING0
967 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
968 AssertRCReturn(rc, rc);
969 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
970 Assert(pMap);
971# else
972 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
973 if (RT_FAILURE(rc))
974 return rc;
975# endif
976 }
977
978 /*
979 * Enter it into the Chunk TLB.
980 */
981 pTlbe->idChunk = idChunk;
982 pTlbe->pChunk = pMap;
983 }
984
985 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
986 return VINF_SUCCESS;
987#endif
988}
989
990
991/**
992 * Maps a page into the current virtual address space so it can be accessed.
993 *
994 * @returns VBox status code.
995 * @retval VINF_SUCCESS on success.
996 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
997 *
998 * @param pVM The VM address.
999 * @param pPage The physical page tracking structure.
1000 * @param GCPhys The address of the page.
1001 * @param ppMap Where to store the address of the mapping tracking structure.
1002 * @param ppv Where to store the mapping address of the page. The page
1003 * offset is masked off!
1004 *
1005 * @remarks Called from within the PGM critical section.
1006 */
1007static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1008{
1009 PGM_LOCK_ASSERT_OWNER(pVM);
1010
1011#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1012 /*
1013 * Just some sketchy GC/R0-darwin code.
1014 */
1015 *ppMap = NULL;
1016 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1017 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1018 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1019 NOREF(GCPhys);
1020 return VINF_SUCCESS;
1021
1022#else /* IN_RING3 || IN_RING0 */
1023
1024
1025 /*
1026 * Special case: ZERO and MMIO2 pages.
1027 */
1028 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1029 if (idChunk == NIL_GMM_CHUNKID)
1030 {
1031 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
1032 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
1033 {
1034 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
1035 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1036 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
1037 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
1038 }
1039 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1040 {
1041 /** @todo deal with aliased MMIO2 pages somehow...
1042 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
1043 * them, that would also avoid this mess. It would actually be kind of
1044 * elegant... */
1045 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1046 }
1047 else
1048 {
1049 /** @todo handle MMIO2 */
1050 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
1051 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
1052 ("pPage=%R[pgmpage]\n", pPage),
1053 VERR_INTERNAL_ERROR_2);
1054 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1055 }
1056 *ppMap = NULL;
1057 return VINF_SUCCESS;
1058 }
1059
1060 /*
1061 * Find/make Chunk TLB entry for the mapping chunk.
1062 */
1063 PPGMCHUNKR3MAP pMap;
1064 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1065 if (pTlbe->idChunk == idChunk)
1066 {
1067 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1068 pMap = pTlbe->pChunk;
1069 AssertPtr(pMap->pv);
1070 }
1071 else
1072 {
1073 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1074
1075 /*
1076 * Find the chunk, map it if necessary.
1077 */
1078 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1079 if (pMap)
1080 {
1081 AssertPtr(pMap->pv);
1082 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1083 }
1084 else
1085 {
1086#ifdef IN_RING0
1087 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1088 AssertRCReturn(rc, rc);
1089 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1090 Assert(pMap);
1091#else
1092 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1093 if (RT_FAILURE(rc))
1094 return rc;
1095#endif
1096 AssertPtr(pMap->pv);
1097 }
1098
1099 /*
1100 * Enter it into the Chunk TLB.
1101 */
1102 pTlbe->idChunk = idChunk;
1103 pTlbe->pChunk = pMap;
1104 }
1105
1106 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1107 *ppMap = pMap;
1108 return VINF_SUCCESS;
1109#endif /* IN_RING3 */
1110}
1111
1112
1113/**
1114 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1115 *
1116 * This is typically used is paths where we cannot use the TLB methods (like ROM
1117 * pages) or where there is no point in using them since we won't get many hits.
1118 *
1119 * @returns VBox strict status code.
1120 * @retval VINF_SUCCESS on success.
1121 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1122 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1123 *
1124 * @param pVM The VM address.
1125 * @param pPage The physical page tracking structure.
1126 * @param GCPhys The address of the page.
1127 * @param ppv Where to store the mapping address of the page. The page
1128 * offset is masked off!
1129 *
1130 * @remarks Called from within the PGM critical section. The mapping is only
1131 * valid while you are inside section.
1132 */
1133int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1134{
1135 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1136 if (RT_SUCCESS(rc))
1137 {
1138 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1139 PPGMPAGEMAP pMapIgnore;
1140 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1141 if (RT_FAILURE(rc2)) /* preserve rc */
1142 rc = rc2;
1143 }
1144 return rc;
1145}
1146
1147
1148/**
1149 * Maps a page into the current virtual address space so it can be accessed for
1150 * both writing and reading.
1151 *
1152 * This is typically used is paths where we cannot use the TLB methods (like ROM
1153 * pages) or where there is no point in using them since we won't get many hits.
1154 *
1155 * @returns VBox status code.
1156 * @retval VINF_SUCCESS on success.
1157 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1158 *
1159 * @param pVM The VM address.
1160 * @param pPage The physical page tracking structure. Must be in the
1161 * allocated state.
1162 * @param GCPhys The address of the page.
1163 * @param ppv Where to store the mapping address of the page. The page
1164 * offset is masked off!
1165 *
1166 * @remarks Called from within the PGM critical section. The mapping is only
1167 * valid while you are inside section.
1168 */
1169int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1170{
1171 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1172 PPGMPAGEMAP pMapIgnore;
1173 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1174}
1175
1176
1177/**
1178 * Maps a page into the current virtual address space so it can be accessed for
1179 * reading.
1180 *
1181 * This is typically used is paths where we cannot use the TLB methods (like ROM
1182 * pages) or where there is no point in using them since we won't get many hits.
1183 *
1184 * @returns VBox status code.
1185 * @retval VINF_SUCCESS on success.
1186 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1187 *
1188 * @param pVM The VM address.
1189 * @param pPage The physical page tracking structure.
1190 * @param GCPhys The address of the page.
1191 * @param ppv Where to store the mapping address of the page. The page
1192 * offset is masked off!
1193 *
1194 * @remarks Called from within the PGM critical section. The mapping is only
1195 * valid while you are inside this section.
1196 */
1197int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1198{
1199 PPGMPAGEMAP pMapIgnore;
1200 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1201}
1202
1203#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1204
1205/**
1206 * Load a guest page into the ring-3 physical TLB.
1207 *
1208 * @returns VBox status code.
1209 * @retval VINF_SUCCESS on success
1210 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1211 * @param pPGM The PGM instance pointer.
1212 * @param GCPhys The guest physical address in question.
1213 */
1214int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1215{
1216 PGM_LOCK_ASSERT_OWNER(pVM);
1217
1218 /*
1219 * Find the ram range and page and hand it over to the with-page function.
1220 * 99.8% of requests are expected to be in the first range.
1221 */
1222 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1223 if (!pPage)
1224 {
1225 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1226 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1227 }
1228
1229 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1230}
1231
1232
1233/**
1234 * Load a guest page into the ring-3 physical TLB.
1235 *
1236 * @returns VBox status code.
1237 * @retval VINF_SUCCESS on success
1238 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1239 *
1240 * @param pVM The VM handle.
1241 * @param pPage Pointer to the PGMPAGE structure corresponding to
1242 * GCPhys.
1243 * @param GCPhys The guest physical address in question.
1244 */
1245int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1246{
1247 PGM_LOCK_ASSERT_OWNER(pVM);
1248 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1249
1250 /*
1251 * Map the page.
1252 * Make a special case for the zero page as it is kind of special.
1253 */
1254 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1255 if ( !PGM_PAGE_IS_ZERO(pPage)
1256 && !PGM_PAGE_IS_BALLOONED(pPage))
1257 {
1258 void *pv;
1259 PPGMPAGEMAP pMap;
1260 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1261 if (RT_FAILURE(rc))
1262 return rc;
1263 pTlbe->pMap = pMap;
1264 pTlbe->pv = pv;
1265 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1266 }
1267 else
1268 {
1269 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1270 pTlbe->pMap = NULL;
1271 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1272 }
1273#ifdef PGM_WITH_PHYS_TLB
1274 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1275 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1276 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1277 else
1278 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1279#else
1280 pTlbe->GCPhys = NIL_RTGCPHYS;
1281#endif
1282 pTlbe->pPage = pPage;
1283 return VINF_SUCCESS;
1284}
1285
1286#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1287
1288/**
1289 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1290 * own the PGM lock and therefore not need to lock the mapped page.
1291 *
1292 * @returns VBox status code.
1293 * @retval VINF_SUCCESS on success.
1294 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1295 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1296 *
1297 * @param pVM The VM handle.
1298 * @param GCPhys The guest physical address of the page that should be mapped.
1299 * @param pPage Pointer to the PGMPAGE structure for the page.
1300 * @param ppv Where to store the address corresponding to GCPhys.
1301 *
1302 * @internal
1303 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1304 */
1305int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1306{
1307 int rc;
1308 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1309 PGM_LOCK_ASSERT_OWNER(pVM);
1310 pVM->pgm.s.cDeprecatedPageLocks++;
1311
1312 /*
1313 * Make sure the page is writable.
1314 */
1315 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1316 {
1317 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1318 if (RT_FAILURE(rc))
1319 return rc;
1320 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1321 }
1322 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1323
1324 /*
1325 * Get the mapping address.
1326 */
1327#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1328 void *pv;
1329 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1330 PGM_PAGE_GET_HCPHYS(pPage),
1331 &pv
1332 RTLOG_COMMA_SRC_POS);
1333 if (RT_FAILURE(rc))
1334 return rc;
1335 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1336#else
1337 PPGMPAGEMAPTLBE pTlbe;
1338 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1339 if (RT_FAILURE(rc))
1340 return rc;
1341 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1342#endif
1343 return VINF_SUCCESS;
1344}
1345
1346#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1347
1348/**
1349 * Locks a page mapping for writing.
1350 *
1351 * @param pVM The VM handle.
1352 * @param pPage The page.
1353 * @param pTlbe The mapping TLB entry for the page.
1354 * @param pLock The lock structure (output).
1355 */
1356DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1357{
1358 PPGMPAGEMAP pMap = pTlbe->pMap;
1359 if (pMap)
1360 pMap->cRefs++;
1361
1362 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1363 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1364 {
1365 if (cLocks == 0)
1366 pVM->pgm.s.cWriteLockedPages++;
1367 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1368 }
1369 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1370 {
1371 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1372 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1373 if (pMap)
1374 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1375 }
1376
1377 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1378 pLock->pvMap = pMap;
1379}
1380
1381/**
1382 * Locks a page mapping for reading.
1383 *
1384 * @param pVM The VM handle.
1385 * @param pPage The page.
1386 * @param pTlbe The mapping TLB entry for the page.
1387 * @param pLock The lock structure (output).
1388 */
1389DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1390{
1391 PPGMPAGEMAP pMap = pTlbe->pMap;
1392 if (pMap)
1393 pMap->cRefs++;
1394
1395 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1396 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1397 {
1398 if (cLocks == 0)
1399 pVM->pgm.s.cReadLockedPages++;
1400 PGM_PAGE_INC_READ_LOCKS(pPage);
1401 }
1402 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1403 {
1404 PGM_PAGE_INC_READ_LOCKS(pPage);
1405 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1406 if (pMap)
1407 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1408 }
1409
1410 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1411 pLock->pvMap = pMap;
1412}
1413
1414#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1415
1416
1417/**
1418 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1419 * own the PGM lock and have access to the page structure.
1420 *
1421 * @returns VBox status code.
1422 * @retval VINF_SUCCESS on success.
1423 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1424 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1425 *
1426 * @param pVM The VM handle.
1427 * @param GCPhys The guest physical address of the page that should be mapped.
1428 * @param pPage Pointer to the PGMPAGE structure for the page.
1429 * @param ppv Where to store the address corresponding to GCPhys.
1430 * @param pLock Where to store the lock information that
1431 * pgmPhysReleaseInternalPageMappingLock needs.
1432 *
1433 * @internal
1434 */
1435int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1436{
1437 int rc;
1438 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1439 PGM_LOCK_ASSERT_OWNER(pVM);
1440
1441 /*
1442 * Make sure the page is writable.
1443 */
1444 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1445 {
1446 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1447 if (RT_FAILURE(rc))
1448 return rc;
1449 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1450 }
1451 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1452
1453 /*
1454 * Do the job.
1455 */
1456#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1457 void *pv;
1458 PVMCPU pVCpu = VMMGetCpu(pVM);
1459 rc = pgmRZDynMapHCPageInlined(pVCpu,
1460 PGM_PAGE_GET_HCPHYS(pPage),
1461 &pv
1462 RTLOG_COMMA_SRC_POS);
1463 if (RT_FAILURE(rc))
1464 return rc;
1465 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1466 pLock->pvPage = pv;
1467 pLock->pVCpu = pVCpu;
1468
1469#else
1470 PPGMPAGEMAPTLBE pTlbe;
1471 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1472 if (RT_FAILURE(rc))
1473 return rc;
1474 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1475 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1476#endif
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1483 * own the PGM lock and have access to the page structure.
1484 *
1485 * @returns VBox status code.
1486 * @retval VINF_SUCCESS on success.
1487 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1488 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1489 *
1490 * @param pVM The VM handle.
1491 * @param GCPhys The guest physical address of the page that should be mapped.
1492 * @param pPage Pointer to the PGMPAGE structure for the page.
1493 * @param ppv Where to store the address corresponding to GCPhys.
1494 * @param pLock Where to store the lock information that
1495 * pgmPhysReleaseInternalPageMappingLock needs.
1496 *
1497 * @internal
1498 */
1499int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1500{
1501 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1502 PGM_LOCK_ASSERT_OWNER(pVM);
1503 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1504
1505 /*
1506 * Do the job.
1507 */
1508#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1509 void *pv;
1510 PVMCPU pVCpu = VMMGetCpu(pVM);
1511 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1512 PGM_PAGE_GET_HCPHYS(pPage),
1513 &pv
1514 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1515 if (RT_FAILURE(rc))
1516 return rc;
1517 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1518 pLock->pvPage = pv;
1519 pLock->pVCpu = pVCpu;
1520
1521#else
1522 PPGMPAGEMAPTLBE pTlbe;
1523 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1524 if (RT_FAILURE(rc))
1525 return rc;
1526 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1527 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1528#endif
1529 return VINF_SUCCESS;
1530}
1531
1532
1533/**
1534 * Requests the mapping of a guest page into the current context.
1535 *
1536 * This API should only be used for very short term, as it will consume scarse
1537 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1538 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1539 *
1540 * This API will assume your intention is to write to the page, and will
1541 * therefore replace shared and zero pages. If you do not intend to modify
1542 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1543 *
1544 * @returns VBox status code.
1545 * @retval VINF_SUCCESS on success.
1546 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1547 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1548 *
1549 * @param pVM The VM handle.
1550 * @param GCPhys The guest physical address of the page that should be
1551 * mapped.
1552 * @param ppv Where to store the address corresponding to GCPhys.
1553 * @param pLock Where to store the lock information that
1554 * PGMPhysReleasePageMappingLock needs.
1555 *
1556 * @remarks The caller is responsible for dealing with access handlers.
1557 * @todo Add an informational return code for pages with access handlers?
1558 *
1559 * @remark Avoid calling this API from within critical sections (other than
1560 * the PGM one) because of the deadlock risk. External threads may
1561 * need to delegate jobs to the EMTs.
1562 * @remarks Only one page is mapped! Make no assumption about what's after or
1563 * before the returned page!
1564 * @thread Any thread.
1565 */
1566VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1567{
1568 int rc = pgmLock(pVM);
1569 AssertRCReturn(rc, rc);
1570
1571#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1572 /*
1573 * Find the page and make sure it's writable.
1574 */
1575 PPGMPAGE pPage;
1576 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1577 if (RT_SUCCESS(rc))
1578 {
1579 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1580 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1581 if (RT_SUCCESS(rc))
1582 {
1583 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1584
1585 PVMCPU pVCpu = VMMGetCpu(pVM);
1586 void *pv;
1587 rc = pgmRZDynMapHCPageInlined(pVCpu,
1588 PGM_PAGE_GET_HCPHYS(pPage),
1589 &pv
1590 RTLOG_COMMA_SRC_POS);
1591 if (RT_SUCCESS(rc))
1592 {
1593 AssertRCSuccess(rc);
1594
1595 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1596 *ppv = pv;
1597 pLock->pvPage = pv;
1598 pLock->pVCpu = pVCpu;
1599 }
1600 }
1601 }
1602
1603#else /* IN_RING3 || IN_RING0 */
1604 /*
1605 * Query the Physical TLB entry for the page (may fail).
1606 */
1607 PPGMPAGEMAPTLBE pTlbe;
1608 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1609 if (RT_SUCCESS(rc))
1610 {
1611 /*
1612 * If the page is shared, the zero page, or being write monitored
1613 * it must be converted to a page that's writable if possible.
1614 */
1615 PPGMPAGE pPage = pTlbe->pPage;
1616 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1617 {
1618 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1619 if (RT_SUCCESS(rc))
1620 {
1621 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1622 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1623 }
1624 }
1625 if (RT_SUCCESS(rc))
1626 {
1627 /*
1628 * Now, just perform the locking and calculate the return address.
1629 */
1630 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1631 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1632 }
1633 }
1634
1635#endif /* IN_RING3 || IN_RING0 */
1636 pgmUnlock(pVM);
1637 return rc;
1638}
1639
1640
1641/**
1642 * Requests the mapping of a guest page into the current context.
1643 *
1644 * This API should only be used for very short term, as it will consume scarse
1645 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1646 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1647 *
1648 * @returns VBox status code.
1649 * @retval VINF_SUCCESS on success.
1650 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1651 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1652 *
1653 * @param pVM The VM handle.
1654 * @param GCPhys The guest physical address of the page that should be
1655 * mapped.
1656 * @param ppv Where to store the address corresponding to GCPhys.
1657 * @param pLock Where to store the lock information that
1658 * PGMPhysReleasePageMappingLock needs.
1659 *
1660 * @remarks The caller is responsible for dealing with access handlers.
1661 * @todo Add an informational return code for pages with access handlers?
1662 *
1663 * @remarks Avoid calling this API from within critical sections (other than
1664 * the PGM one) because of the deadlock risk.
1665 * @remarks Only one page is mapped! Make no assumption about what's after or
1666 * before the returned page!
1667 * @thread Any thread.
1668 */
1669VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1670{
1671 int rc = pgmLock(pVM);
1672 AssertRCReturn(rc, rc);
1673
1674#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1675 /*
1676 * Find the page and make sure it's readable.
1677 */
1678 PPGMPAGE pPage;
1679 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1680 if (RT_SUCCESS(rc))
1681 {
1682 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1683 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1684 else
1685 {
1686 PVMCPU pVCpu = VMMGetCpu(pVM);
1687 void *pv;
1688 rc = pgmRZDynMapHCPageInlined(pVCpu,
1689 PGM_PAGE_GET_HCPHYS(pPage),
1690 &pv
1691 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1692 if (RT_SUCCESS(rc))
1693 {
1694 AssertRCSuccess(rc);
1695
1696 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1697 *ppv = pv;
1698 pLock->pvPage = pv;
1699 pLock->pVCpu = pVCpu;
1700 }
1701 }
1702 }
1703
1704#else /* IN_RING3 || IN_RING0 */
1705 /*
1706 * Query the Physical TLB entry for the page (may fail).
1707 */
1708 PPGMPAGEMAPTLBE pTlbe;
1709 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1710 if (RT_SUCCESS(rc))
1711 {
1712 /* MMIO pages doesn't have any readable backing. */
1713 PPGMPAGE pPage = pTlbe->pPage;
1714 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1715 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1716 else
1717 {
1718 /*
1719 * Now, just perform the locking and calculate the return address.
1720 */
1721 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1722 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1723 }
1724 }
1725
1726#endif /* IN_RING3 || IN_RING0 */
1727 pgmUnlock(pVM);
1728 return rc;
1729}
1730
1731
1732/**
1733 * Requests the mapping of a guest page given by virtual address into the current context.
1734 *
1735 * This API should only be used for very short term, as it will consume
1736 * scarse resources (R0 and GC) in the mapping cache. When you're done
1737 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1738 *
1739 * This API will assume your intention is to write to the page, and will
1740 * therefore replace shared and zero pages. If you do not intend to modify
1741 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1742 *
1743 * @returns VBox status code.
1744 * @retval VINF_SUCCESS on success.
1745 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1746 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1747 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1748 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1749 *
1750 * @param pVCpu VMCPU handle.
1751 * @param GCPhys The guest physical address of the page that should be mapped.
1752 * @param ppv Where to store the address corresponding to GCPhys.
1753 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1754 *
1755 * @remark Avoid calling this API from within critical sections (other than
1756 * the PGM one) because of the deadlock risk.
1757 * @thread EMT
1758 */
1759VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1760{
1761 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1762 RTGCPHYS GCPhys;
1763 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1764 if (RT_SUCCESS(rc))
1765 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1766 return rc;
1767}
1768
1769
1770/**
1771 * Requests the mapping of a guest page given by virtual address into the current context.
1772 *
1773 * This API should only be used for very short term, as it will consume
1774 * scarse resources (R0 and GC) in the mapping cache. When you're done
1775 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1776 *
1777 * @returns VBox status code.
1778 * @retval VINF_SUCCESS on success.
1779 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1780 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1781 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1782 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1783 *
1784 * @param pVCpu VMCPU handle.
1785 * @param GCPhys The guest physical address of the page that should be mapped.
1786 * @param ppv Where to store the address corresponding to GCPhys.
1787 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1788 *
1789 * @remark Avoid calling this API from within critical sections (other than
1790 * the PGM one) because of the deadlock risk.
1791 * @thread EMT
1792 */
1793VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1794{
1795 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1796 RTGCPHYS GCPhys;
1797 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1798 if (RT_SUCCESS(rc))
1799 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1800 return rc;
1801}
1802
1803
1804/**
1805 * Release the mapping of a guest page.
1806 *
1807 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1808 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1809 *
1810 * @param pVM The VM handle.
1811 * @param pLock The lock structure initialized by the mapping function.
1812 */
1813VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1814{
1815#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1816 Assert(pLock->pvPage != NULL);
1817 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1818 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1819 pLock->pVCpu = NULL;
1820 pLock->pvPage = NULL;
1821
1822#else
1823 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1824 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1825 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1826
1827 pLock->uPageAndType = 0;
1828 pLock->pvMap = NULL;
1829
1830 pgmLock(pVM);
1831 if (fWriteLock)
1832 {
1833 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1834 Assert(cLocks > 0);
1835 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1836 {
1837 if (cLocks == 1)
1838 {
1839 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1840 pVM->pgm.s.cWriteLockedPages--;
1841 }
1842 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1843 }
1844
1845 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1846 {
1847 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1848 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1849 Assert(pVM->pgm.s.cMonitoredPages > 0);
1850 pVM->pgm.s.cMonitoredPages--;
1851 pVM->pgm.s.cWrittenToPages++;
1852 }
1853 }
1854 else
1855 {
1856 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1857 Assert(cLocks > 0);
1858 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1859 {
1860 if (cLocks == 1)
1861 {
1862 Assert(pVM->pgm.s.cReadLockedPages > 0);
1863 pVM->pgm.s.cReadLockedPages--;
1864 }
1865 PGM_PAGE_DEC_READ_LOCKS(pPage);
1866 }
1867 }
1868
1869 if (pMap)
1870 {
1871 Assert(pMap->cRefs >= 1);
1872 pMap->cRefs--;
1873 }
1874 pgmUnlock(pVM);
1875#endif /* IN_RING3 */
1876}
1877
1878
1879/**
1880 * Release the internal mapping of a guest page.
1881 *
1882 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1883 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1884 *
1885 * @param pVM The VM handle.
1886 * @param pLock The lock structure initialized by the mapping function.
1887 *
1888 * @remarks Caller must hold the PGM lock.
1889 */
1890void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1891{
1892 PGM_LOCK_ASSERT_OWNER(pVM);
1893 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1894}
1895
1896
1897/**
1898 * Converts a GC physical address to a HC ring-3 pointer.
1899 *
1900 * @returns VINF_SUCCESS on success.
1901 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1902 * page but has no physical backing.
1903 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1904 * GC physical address.
1905 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1906 * a dynamic ram chunk boundary
1907 *
1908 * @param pVM The VM handle.
1909 * @param GCPhys The GC physical address to convert.
1910 * @param pR3Ptr Where to store the R3 pointer on success.
1911 *
1912 * @deprecated Avoid when possible!
1913 */
1914int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1915{
1916/** @todo this is kind of hacky and needs some more work. */
1917#ifndef DEBUG_sandervl
1918 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1919#endif
1920
1921 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1922#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1923 NOREF(pVM); NOREF(pR3Ptr);
1924 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1925#else
1926 pgmLock(pVM);
1927
1928 PPGMRAMRANGE pRam;
1929 PPGMPAGE pPage;
1930 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1931 if (RT_SUCCESS(rc))
1932 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1933
1934 pgmUnlock(pVM);
1935 Assert(rc <= VINF_SUCCESS);
1936 return rc;
1937#endif
1938}
1939
1940#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1941
1942/**
1943 * Maps and locks a guest CR3 or PD (PAE) page.
1944 *
1945 * @returns VINF_SUCCESS on success.
1946 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1947 * page but has no physical backing.
1948 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1949 * GC physical address.
1950 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1951 * a dynamic ram chunk boundary
1952 *
1953 * @param pVM The VM handle.
1954 * @param GCPhys The GC physical address to convert.
1955 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1956 * may not be valid in ring-0 depending on the
1957 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1958 *
1959 * @remarks The caller must own the PGM lock.
1960 */
1961int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1962{
1963
1964 PPGMRAMRANGE pRam;
1965 PPGMPAGE pPage;
1966 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1967 if (RT_SUCCESS(rc))
1968 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1969 Assert(rc <= VINF_SUCCESS);
1970 return rc;
1971}
1972
1973
1974int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1975{
1976
1977}
1978
1979#endif
1980
1981/**
1982 * Converts a guest pointer to a GC physical address.
1983 *
1984 * This uses the current CR3/CR0/CR4 of the guest.
1985 *
1986 * @returns VBox status code.
1987 * @param pVCpu The VMCPU Handle
1988 * @param GCPtr The guest pointer to convert.
1989 * @param pGCPhys Where to store the GC physical address.
1990 */
1991VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1992{
1993 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1994 if (pGCPhys && RT_SUCCESS(rc))
1995 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1996 return rc;
1997}
1998
1999
2000/**
2001 * Converts a guest pointer to a HC physical address.
2002 *
2003 * This uses the current CR3/CR0/CR4 of the guest.
2004 *
2005 * @returns VBox status code.
2006 * @param pVCpu The VMCPU Handle
2007 * @param GCPtr The guest pointer to convert.
2008 * @param pHCPhys Where to store the HC physical address.
2009 */
2010VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2011{
2012 PVM pVM = pVCpu->CTX_SUFF(pVM);
2013 RTGCPHYS GCPhys;
2014 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2015 if (RT_SUCCESS(rc))
2016 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2017 return rc;
2018}
2019
2020
2021
2022#undef LOG_GROUP
2023#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2024
2025
2026#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2027/**
2028 * Cache PGMPhys memory access
2029 *
2030 * @param pVM VM Handle.
2031 * @param pCache Cache structure pointer
2032 * @param GCPhys GC physical address
2033 * @param pbHC HC pointer corresponding to physical page
2034 *
2035 * @thread EMT.
2036 */
2037static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2038{
2039 uint32_t iCacheIndex;
2040
2041 Assert(VM_IS_EMT(pVM));
2042
2043 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2044 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2045
2046 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2047
2048 ASMBitSet(&pCache->aEntries, iCacheIndex);
2049
2050 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2051 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2052}
2053#endif /* IN_RING3 */
2054
2055
2056/**
2057 * Deals with reading from a page with one or more ALL access handlers.
2058 *
2059 * @returns VBox status code. Can be ignored in ring-3.
2060 * @retval VINF_SUCCESS.
2061 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2062 *
2063 * @param pVM The VM handle.
2064 * @param pPage The page descriptor.
2065 * @param GCPhys The physical address to start reading at.
2066 * @param pvBuf Where to put the bits we read.
2067 * @param cb How much to read - less or equal to a page.
2068 */
2069static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
2070{
2071 /*
2072 * The most frequent access here is MMIO and shadowed ROM.
2073 * The current code ASSUMES all these access handlers covers full pages!
2074 */
2075
2076 /*
2077 * Whatever we do we need the source page, map it first.
2078 */
2079 PGMPAGEMAPLOCK PgMpLck;
2080 const void *pvSrc = NULL;
2081 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2082 if (RT_FAILURE(rc))
2083 {
2084 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2085 GCPhys, pPage, rc));
2086 memset(pvBuf, 0xff, cb);
2087 return VINF_SUCCESS;
2088 }
2089 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2090
2091 /*
2092 * Deal with any physical handlers.
2093 */
2094#ifdef IN_RING3
2095 PPGMPHYSHANDLER pPhys = NULL;
2096#endif
2097 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
2098 {
2099#ifdef IN_RING3
2100 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2101 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2102 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2103 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2104 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2105 Assert(pPhys->CTX_SUFF(pfnHandler));
2106
2107 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2108 void *pvUser = pPhys->CTX_SUFF(pvUser);
2109
2110 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2111 STAM_PROFILE_START(&pPhys->Stat, h);
2112 PGM_LOCK_ASSERT_OWNER(pVM);
2113 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2114 pgmUnlock(pVM);
2115 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2116 pgmLock(pVM);
2117# ifdef VBOX_WITH_STATISTICS
2118 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2119 if (pPhys)
2120 STAM_PROFILE_STOP(&pPhys->Stat, h);
2121# else
2122 pPhys = NULL; /* might not be valid anymore. */
2123# endif
2124 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2125#else
2126 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2127 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2128 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2129 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2130#endif
2131 }
2132
2133 /*
2134 * Deal with any virtual handlers.
2135 */
2136 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2137 {
2138 unsigned iPage;
2139 PPGMVIRTHANDLER pVirt;
2140
2141 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2142 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2143 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2144 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2145 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2146
2147#ifdef IN_RING3
2148 if (pVirt->pfnHandlerR3)
2149 {
2150 if (!pPhys)
2151 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2152 else
2153 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2154 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2155 + (iPage << PAGE_SHIFT)
2156 + (GCPhys & PAGE_OFFSET_MASK);
2157
2158 STAM_PROFILE_START(&pVirt->Stat, h);
2159 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
2160 STAM_PROFILE_STOP(&pVirt->Stat, h);
2161 if (rc2 == VINF_SUCCESS)
2162 rc = VINF_SUCCESS;
2163 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2164 }
2165 else
2166 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2167#else
2168 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2169 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2170 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2171 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2172#endif
2173 }
2174
2175 /*
2176 * Take the default action.
2177 */
2178 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2179 memcpy(pvBuf, pvSrc, cb);
2180 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2181 return rc;
2182}
2183
2184
2185/**
2186 * Read physical memory.
2187 *
2188 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2189 * want to ignore those.
2190 *
2191 * @returns VBox status code. Can be ignored in ring-3.
2192 * @retval VINF_SUCCESS.
2193 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2194 *
2195 * @param pVM VM Handle.
2196 * @param GCPhys Physical address start reading from.
2197 * @param pvBuf Where to put the read bits.
2198 * @param cbRead How many bytes to read.
2199 */
2200VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
2201{
2202 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2203 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2204
2205 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2206 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2207
2208 pgmLock(pVM);
2209
2210 /*
2211 * Copy loop on ram ranges.
2212 */
2213 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2214 for (;;)
2215 {
2216 /* Inside range or not? */
2217 if (pRam && GCPhys >= pRam->GCPhys)
2218 {
2219 /*
2220 * Must work our way thru this page by page.
2221 */
2222 RTGCPHYS off = GCPhys - pRam->GCPhys;
2223 while (off < pRam->cb)
2224 {
2225 unsigned iPage = off >> PAGE_SHIFT;
2226 PPGMPAGE pPage = &pRam->aPages[iPage];
2227 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2228 if (cb > cbRead)
2229 cb = cbRead;
2230
2231 /*
2232 * Any ALL access handlers?
2233 */
2234 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
2235 {
2236 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2237 if (RT_FAILURE(rc))
2238 {
2239 pgmUnlock(pVM);
2240 return rc;
2241 }
2242 }
2243 else
2244 {
2245 /*
2246 * Get the pointer to the page.
2247 */
2248 PGMPAGEMAPLOCK PgMpLck;
2249 const void *pvSrc;
2250 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2251 if (RT_SUCCESS(rc))
2252 {
2253 memcpy(pvBuf, pvSrc, cb);
2254 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2255 }
2256 else
2257 {
2258 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2259 pRam->GCPhys + off, pPage, rc));
2260 memset(pvBuf, 0xff, cb);
2261 }
2262 }
2263
2264 /* next page */
2265 if (cb >= cbRead)
2266 {
2267 pgmUnlock(pVM);
2268 return VINF_SUCCESS;
2269 }
2270 cbRead -= cb;
2271 off += cb;
2272 pvBuf = (char *)pvBuf + cb;
2273 } /* walk pages in ram range. */
2274
2275 GCPhys = pRam->GCPhysLast + 1;
2276 }
2277 else
2278 {
2279 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2280
2281 /*
2282 * Unassigned address space.
2283 */
2284 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2285 if (cb >= cbRead)
2286 {
2287 memset(pvBuf, 0xff, cbRead);
2288 break;
2289 }
2290 memset(pvBuf, 0xff, cb);
2291
2292 cbRead -= cb;
2293 pvBuf = (char *)pvBuf + cb;
2294 GCPhys += cb;
2295 }
2296
2297 /* Advance range if necessary. */
2298 while (pRam && GCPhys > pRam->GCPhysLast)
2299 pRam = pRam->CTX_SUFF(pNext);
2300 } /* Ram range walk */
2301
2302 pgmUnlock(pVM);
2303 return VINF_SUCCESS;
2304}
2305
2306
2307/**
2308 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2309 *
2310 * @returns VBox status code. Can be ignored in ring-3.
2311 * @retval VINF_SUCCESS.
2312 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2313 *
2314 * @param pVM The VM handle.
2315 * @param pPage The page descriptor.
2316 * @param GCPhys The physical address to start writing at.
2317 * @param pvBuf What to write.
2318 * @param cbWrite How much to write - less or equal to a page.
2319 */
2320static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2321{
2322 PGMPAGEMAPLOCK PgMpLck;
2323 void *pvDst = NULL;
2324 int rc;
2325
2326 /*
2327 * Give priority to physical handlers (like #PF does).
2328 *
2329 * Hope for a lonely physical handler first that covers the whole
2330 * write area. This should be a pretty frequent case with MMIO and
2331 * the heavy usage of full page handlers in the page pool.
2332 */
2333 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2334 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
2335 {
2336 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2337 if (pCur)
2338 {
2339 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2340 Assert(pCur->CTX_SUFF(pfnHandler));
2341
2342 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2343 if (cbRange > cbWrite)
2344 cbRange = cbWrite;
2345
2346#ifndef IN_RING3
2347 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2348 NOREF(cbRange);
2349 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2350 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2351
2352#else /* IN_RING3 */
2353 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2354 if (!PGM_PAGE_IS_MMIO(pPage))
2355 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2356 else
2357 rc = VINF_SUCCESS;
2358 if (RT_SUCCESS(rc))
2359 {
2360 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2361 void *pvUser = pCur->CTX_SUFF(pvUser);
2362
2363 STAM_PROFILE_START(&pCur->Stat, h);
2364 PGM_LOCK_ASSERT_OWNER(pVM);
2365 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2366 pgmUnlock(pVM);
2367 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2368 pgmLock(pVM);
2369# ifdef VBOX_WITH_STATISTICS
2370 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2371 if (pCur)
2372 STAM_PROFILE_STOP(&pCur->Stat, h);
2373# else
2374 pCur = NULL; /* might not be valid anymore. */
2375# endif
2376 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2377 {
2378 if (pvDst)
2379 memcpy(pvDst, pvBuf, cbRange);
2380 }
2381 else
2382 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2383 }
2384 else
2385 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2386 GCPhys, pPage, rc), rc);
2387 if (RT_LIKELY(cbRange == cbWrite))
2388 {
2389 if (pvBuf)
2390 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2391 return VINF_SUCCESS;
2392 }
2393
2394 /* more fun to be had below */
2395 cbWrite -= cbRange;
2396 GCPhys += cbRange;
2397 pvBuf = (uint8_t *)pvBuf + cbRange;
2398 pvDst = (uint8_t *)pvDst + cbRange;
2399#endif /* IN_RING3 */
2400 }
2401 /* else: the handler is somewhere else in the page, deal with it below. */
2402 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2403 }
2404 /*
2405 * A virtual handler without any interfering physical handlers.
2406 * Hopefully it'll convert the whole write.
2407 */
2408 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2409 {
2410 unsigned iPage;
2411 PPGMVIRTHANDLER pCur;
2412 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2413 if (RT_SUCCESS(rc))
2414 {
2415 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2416 if (cbRange > cbWrite)
2417 cbRange = cbWrite;
2418
2419#ifndef IN_RING3
2420 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2421 NOREF(cbRange);
2422 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2423 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2424
2425#else /* IN_RING3 */
2426
2427 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2428 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2429 if (RT_SUCCESS(rc))
2430 {
2431 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2432 if (pCur->pfnHandlerR3)
2433 {
2434 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2435 + (iPage << PAGE_SHIFT)
2436 + (GCPhys & PAGE_OFFSET_MASK);
2437
2438 STAM_PROFILE_START(&pCur->Stat, h);
2439 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2440 STAM_PROFILE_STOP(&pCur->Stat, h);
2441 }
2442 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2443 memcpy(pvDst, pvBuf, cbRange);
2444 else
2445 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2446 }
2447 else
2448 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2449 GCPhys, pPage, rc), rc);
2450 if (RT_LIKELY(cbRange == cbWrite))
2451 {
2452 if (pvBuf)
2453 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2454 return VINF_SUCCESS;
2455 }
2456
2457 /* more fun to be had below */
2458 cbWrite -= cbRange;
2459 GCPhys += cbRange;
2460 pvBuf = (uint8_t *)pvBuf + cbRange;
2461 pvDst = (uint8_t *)pvDst + cbRange;
2462#endif
2463 }
2464 /* else: the handler is somewhere else in the page, deal with it below. */
2465 }
2466
2467 /*
2468 * Deal with all the odd ends.
2469 */
2470
2471 /* We need a writable destination page. */
2472 if (!pvDst)
2473 {
2474 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2475 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2476 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2477 GCPhys, pPage, rc), rc);
2478 }
2479
2480 /* The loop state (big + ugly). */
2481 unsigned iVirtPage = 0;
2482 PPGMVIRTHANDLER pVirt = NULL;
2483 uint32_t offVirt = PAGE_SIZE;
2484 uint32_t offVirtLast = PAGE_SIZE;
2485 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2486
2487 PPGMPHYSHANDLER pPhys = NULL;
2488 uint32_t offPhys = PAGE_SIZE;
2489 uint32_t offPhysLast = PAGE_SIZE;
2490 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2491
2492 /* The loop. */
2493 for (;;)
2494 {
2495 /*
2496 * Find the closest handler at or above GCPhys.
2497 */
2498 if (fMoreVirt && !pVirt)
2499 {
2500 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2501 if (RT_SUCCESS(rc))
2502 {
2503 offVirt = 0;
2504 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2505 }
2506 else
2507 {
2508 PPGMPHYS2VIRTHANDLER pVirtPhys;
2509 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2510 GCPhys, true /* fAbove */);
2511 if ( pVirtPhys
2512 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2513 {
2514 /* ASSUME that pVirtPhys only covers one page. */
2515 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2516 Assert(pVirtPhys->Core.Key > GCPhys);
2517
2518 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2519 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2520 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2521 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2522 }
2523 else
2524 {
2525 pVirt = NULL;
2526 fMoreVirt = false;
2527 offVirt = offVirtLast = PAGE_SIZE;
2528 }
2529 }
2530 }
2531
2532 if (fMorePhys && !pPhys)
2533 {
2534 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2535 if (pPhys)
2536 {
2537 offPhys = 0;
2538 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2539 }
2540 else
2541 {
2542 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2543 GCPhys, true /* fAbove */);
2544 if ( pPhys
2545 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2546 {
2547 offPhys = pPhys->Core.Key - GCPhys;
2548 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2549 }
2550 else
2551 {
2552 pPhys = NULL;
2553 fMorePhys = false;
2554 offPhys = offPhysLast = PAGE_SIZE;
2555 }
2556 }
2557 }
2558
2559 /*
2560 * Handle access to space without handlers (that's easy).
2561 */
2562 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2563 uint32_t cbRange = (uint32_t)cbWrite;
2564 if (offPhys && offVirt)
2565 {
2566 if (cbRange > offPhys)
2567 cbRange = offPhys;
2568 if (cbRange > offVirt)
2569 cbRange = offVirt;
2570 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2571 }
2572 /*
2573 * Physical handler.
2574 */
2575 else if (!offPhys && offVirt)
2576 {
2577 if (cbRange > offPhysLast + 1)
2578 cbRange = offPhysLast + 1;
2579 if (cbRange > offVirt)
2580 cbRange = offVirt;
2581#ifdef IN_RING3
2582 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2583 void *pvUser = pPhys->CTX_SUFF(pvUser);
2584
2585 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2586 STAM_PROFILE_START(&pPhys->Stat, h);
2587 PGM_LOCK_ASSERT_OWNER(pVM);
2588 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2589 pgmUnlock(pVM);
2590 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2591 pgmLock(pVM);
2592# ifdef VBOX_WITH_STATISTICS
2593 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2594 if (pPhys)
2595 STAM_PROFILE_STOP(&pPhys->Stat, h);
2596# else
2597 pPhys = NULL; /* might not be valid anymore. */
2598# endif
2599 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2600#else
2601 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2602 NOREF(cbRange);
2603 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2604 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2605 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2606#endif
2607 }
2608 /*
2609 * Virtual handler.
2610 */
2611 else if (offPhys && !offVirt)
2612 {
2613 if (cbRange > offVirtLast + 1)
2614 cbRange = offVirtLast + 1;
2615 if (cbRange > offPhys)
2616 cbRange = offPhys;
2617#ifdef IN_RING3
2618 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2619 if (pVirt->pfnHandlerR3)
2620 {
2621 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2622 + (iVirtPage << PAGE_SHIFT)
2623 + (GCPhys & PAGE_OFFSET_MASK);
2624 STAM_PROFILE_START(&pVirt->Stat, h);
2625 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2626 STAM_PROFILE_STOP(&pVirt->Stat, h);
2627 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2628 }
2629 pVirt = NULL;
2630#else
2631 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2632 NOREF(cbRange);
2633 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2634 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2635 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2636#endif
2637 }
2638 /*
2639 * Both... give the physical one priority.
2640 */
2641 else
2642 {
2643 Assert(!offPhys && !offVirt);
2644 if (cbRange > offVirtLast + 1)
2645 cbRange = offVirtLast + 1;
2646 if (cbRange > offPhysLast + 1)
2647 cbRange = offPhysLast + 1;
2648
2649#ifdef IN_RING3
2650 if (pVirt->pfnHandlerR3)
2651 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2652 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2653
2654 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2655 void *pvUser = pPhys->CTX_SUFF(pvUser);
2656
2657 STAM_PROFILE_START(&pPhys->Stat, h);
2658 PGM_LOCK_ASSERT_OWNER(pVM);
2659 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2660 pgmUnlock(pVM);
2661 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2662 pgmLock(pVM);
2663# ifdef VBOX_WITH_STATISTICS
2664 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2665 if (pPhys)
2666 STAM_PROFILE_STOP(&pPhys->Stat, h);
2667# else
2668 pPhys = NULL; /* might not be valid anymore. */
2669# endif
2670 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2671 if (pVirt->pfnHandlerR3)
2672 {
2673
2674 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2675 + (iVirtPage << PAGE_SHIFT)
2676 + (GCPhys & PAGE_OFFSET_MASK);
2677 STAM_PROFILE_START(&pVirt->Stat, h2);
2678 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2679 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2680 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2681 rc = VINF_SUCCESS;
2682 else
2683 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2684 }
2685 pPhys = NULL;
2686 pVirt = NULL;
2687#else
2688 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2689 NOREF(cbRange);
2690 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2691 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2692 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2693#endif
2694 }
2695 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2696 memcpy(pvDst, pvBuf, cbRange);
2697
2698 /*
2699 * Advance if we've got more stuff to do.
2700 */
2701 if (cbRange >= cbWrite)
2702 {
2703 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2704 return VINF_SUCCESS;
2705 }
2706
2707 cbWrite -= cbRange;
2708 GCPhys += cbRange;
2709 pvBuf = (uint8_t *)pvBuf + cbRange;
2710 pvDst = (uint8_t *)pvDst + cbRange;
2711
2712 offPhys -= cbRange;
2713 offPhysLast -= cbRange;
2714 offVirt -= cbRange;
2715 offVirtLast -= cbRange;
2716 }
2717}
2718
2719
2720/**
2721 * Write to physical memory.
2722 *
2723 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2724 * want to ignore those.
2725 *
2726 * @returns VBox status code. Can be ignored in ring-3.
2727 * @retval VINF_SUCCESS.
2728 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2729 *
2730 * @param pVM VM Handle.
2731 * @param GCPhys Physical address to write to.
2732 * @param pvBuf What to write.
2733 * @param cbWrite How many bytes to write.
2734 */
2735VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2736{
2737 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2738 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2739 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2740
2741 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2742 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2743
2744 pgmLock(pVM);
2745
2746 /*
2747 * Copy loop on ram ranges.
2748 */
2749 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2750 for (;;)
2751 {
2752 /* Inside range or not? */
2753 if (pRam && GCPhys >= pRam->GCPhys)
2754 {
2755 /*
2756 * Must work our way thru this page by page.
2757 */
2758 RTGCPTR off = GCPhys - pRam->GCPhys;
2759 while (off < pRam->cb)
2760 {
2761 RTGCPTR iPage = off >> PAGE_SHIFT;
2762 PPGMPAGE pPage = &pRam->aPages[iPage];
2763 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2764 if (cb > cbWrite)
2765 cb = cbWrite;
2766
2767 /*
2768 * Any active WRITE or ALL access handlers?
2769 */
2770 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2771 {
2772 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2773 if (RT_FAILURE(rc))
2774 {
2775 pgmUnlock(pVM);
2776 return rc;
2777 }
2778 }
2779 else
2780 {
2781 /*
2782 * Get the pointer to the page.
2783 */
2784 PGMPAGEMAPLOCK PgMpLck;
2785 void *pvDst;
2786 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2787 if (RT_SUCCESS(rc))
2788 {
2789 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2790 memcpy(pvDst, pvBuf, cb);
2791 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2792 }
2793 /* Ignore writes to ballooned pages. */
2794 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2795 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2796 pRam->GCPhys + off, pPage, rc));
2797 }
2798
2799 /* next page */
2800 if (cb >= cbWrite)
2801 {
2802 pgmUnlock(pVM);
2803 return VINF_SUCCESS;
2804 }
2805
2806 cbWrite -= cb;
2807 off += cb;
2808 pvBuf = (const char *)pvBuf + cb;
2809 } /* walk pages in ram range */
2810
2811 GCPhys = pRam->GCPhysLast + 1;
2812 }
2813 else
2814 {
2815 /*
2816 * Unassigned address space, skip it.
2817 */
2818 if (!pRam)
2819 break;
2820 size_t cb = pRam->GCPhys - GCPhys;
2821 if (cb >= cbWrite)
2822 break;
2823 cbWrite -= cb;
2824 pvBuf = (const char *)pvBuf + cb;
2825 GCPhys += cb;
2826 }
2827
2828 /* Advance range if necessary. */
2829 while (pRam && GCPhys > pRam->GCPhysLast)
2830 pRam = pRam->CTX_SUFF(pNext);
2831 } /* Ram range walk */
2832
2833 pgmUnlock(pVM);
2834 return VINF_SUCCESS;
2835}
2836
2837
2838/**
2839 * Read from guest physical memory by GC physical address, bypassing
2840 * MMIO and access handlers.
2841 *
2842 * @returns VBox status.
2843 * @param pVM VM handle.
2844 * @param pvDst The destination address.
2845 * @param GCPhysSrc The source address (GC physical address).
2846 * @param cb The number of bytes to read.
2847 */
2848VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2849{
2850 /*
2851 * Treat the first page as a special case.
2852 */
2853 if (!cb)
2854 return VINF_SUCCESS;
2855
2856 /* map the 1st page */
2857 void const *pvSrc;
2858 PGMPAGEMAPLOCK Lock;
2859 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2860 if (RT_FAILURE(rc))
2861 return rc;
2862
2863 /* optimize for the case where access is completely within the first page. */
2864 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2865 if (RT_LIKELY(cb <= cbPage))
2866 {
2867 memcpy(pvDst, pvSrc, cb);
2868 PGMPhysReleasePageMappingLock(pVM, &Lock);
2869 return VINF_SUCCESS;
2870 }
2871
2872 /* copy to the end of the page. */
2873 memcpy(pvDst, pvSrc, cbPage);
2874 PGMPhysReleasePageMappingLock(pVM, &Lock);
2875 GCPhysSrc += cbPage;
2876 pvDst = (uint8_t *)pvDst + cbPage;
2877 cb -= cbPage;
2878
2879 /*
2880 * Page by page.
2881 */
2882 for (;;)
2883 {
2884 /* map the page */
2885 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2886 if (RT_FAILURE(rc))
2887 return rc;
2888
2889 /* last page? */
2890 if (cb <= PAGE_SIZE)
2891 {
2892 memcpy(pvDst, pvSrc, cb);
2893 PGMPhysReleasePageMappingLock(pVM, &Lock);
2894 return VINF_SUCCESS;
2895 }
2896
2897 /* copy the entire page and advance */
2898 memcpy(pvDst, pvSrc, PAGE_SIZE);
2899 PGMPhysReleasePageMappingLock(pVM, &Lock);
2900 GCPhysSrc += PAGE_SIZE;
2901 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2902 cb -= PAGE_SIZE;
2903 }
2904 /* won't ever get here. */
2905}
2906
2907
2908/**
2909 * Write to guest physical memory referenced by GC pointer.
2910 * Write memory to GC physical address in guest physical memory.
2911 *
2912 * This will bypass MMIO and access handlers.
2913 *
2914 * @returns VBox status.
2915 * @param pVM VM handle.
2916 * @param GCPhysDst The GC physical address of the destination.
2917 * @param pvSrc The source buffer.
2918 * @param cb The number of bytes to write.
2919 */
2920VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2921{
2922 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2923
2924 /*
2925 * Treat the first page as a special case.
2926 */
2927 if (!cb)
2928 return VINF_SUCCESS;
2929
2930 /* map the 1st page */
2931 void *pvDst;
2932 PGMPAGEMAPLOCK Lock;
2933 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2934 if (RT_FAILURE(rc))
2935 return rc;
2936
2937 /* optimize for the case where access is completely within the first page. */
2938 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2939 if (RT_LIKELY(cb <= cbPage))
2940 {
2941 memcpy(pvDst, pvSrc, cb);
2942 PGMPhysReleasePageMappingLock(pVM, &Lock);
2943 return VINF_SUCCESS;
2944 }
2945
2946 /* copy to the end of the page. */
2947 memcpy(pvDst, pvSrc, cbPage);
2948 PGMPhysReleasePageMappingLock(pVM, &Lock);
2949 GCPhysDst += cbPage;
2950 pvSrc = (const uint8_t *)pvSrc + cbPage;
2951 cb -= cbPage;
2952
2953 /*
2954 * Page by page.
2955 */
2956 for (;;)
2957 {
2958 /* map the page */
2959 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2960 if (RT_FAILURE(rc))
2961 return rc;
2962
2963 /* last page? */
2964 if (cb <= PAGE_SIZE)
2965 {
2966 memcpy(pvDst, pvSrc, cb);
2967 PGMPhysReleasePageMappingLock(pVM, &Lock);
2968 return VINF_SUCCESS;
2969 }
2970
2971 /* copy the entire page and advance */
2972 memcpy(pvDst, pvSrc, PAGE_SIZE);
2973 PGMPhysReleasePageMappingLock(pVM, &Lock);
2974 GCPhysDst += PAGE_SIZE;
2975 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2976 cb -= PAGE_SIZE;
2977 }
2978 /* won't ever get here. */
2979}
2980
2981
2982/**
2983 * Read from guest physical memory referenced by GC pointer.
2984 *
2985 * This function uses the current CR3/CR0/CR4 of the guest and will
2986 * bypass access handlers and not set any accessed bits.
2987 *
2988 * @returns VBox status.
2989 * @param pVCpu Handle to the current virtual CPU.
2990 * @param pvDst The destination address.
2991 * @param GCPtrSrc The source address (GC pointer).
2992 * @param cb The number of bytes to read.
2993 */
2994VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2995{
2996 PVM pVM = pVCpu->CTX_SUFF(pVM);
2997/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
2998
2999 /*
3000 * Treat the first page as a special case.
3001 */
3002 if (!cb)
3003 return VINF_SUCCESS;
3004
3005 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3006 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3007
3008 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3009 * when many VCPUs are fighting for the lock.
3010 */
3011 pgmLock(pVM);
3012
3013 /* map the 1st page */
3014 void const *pvSrc;
3015 PGMPAGEMAPLOCK Lock;
3016 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3017 if (RT_FAILURE(rc))
3018 {
3019 pgmUnlock(pVM);
3020 return rc;
3021 }
3022
3023 /* optimize for the case where access is completely within the first page. */
3024 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3025 if (RT_LIKELY(cb <= cbPage))
3026 {
3027 memcpy(pvDst, pvSrc, cb);
3028 PGMPhysReleasePageMappingLock(pVM, &Lock);
3029 pgmUnlock(pVM);
3030 return VINF_SUCCESS;
3031 }
3032
3033 /* copy to the end of the page. */
3034 memcpy(pvDst, pvSrc, cbPage);
3035 PGMPhysReleasePageMappingLock(pVM, &Lock);
3036 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3037 pvDst = (uint8_t *)pvDst + cbPage;
3038 cb -= cbPage;
3039
3040 /*
3041 * Page by page.
3042 */
3043 for (;;)
3044 {
3045 /* map the page */
3046 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3047 if (RT_FAILURE(rc))
3048 {
3049 pgmUnlock(pVM);
3050 return rc;
3051 }
3052
3053 /* last page? */
3054 if (cb <= PAGE_SIZE)
3055 {
3056 memcpy(pvDst, pvSrc, cb);
3057 PGMPhysReleasePageMappingLock(pVM, &Lock);
3058 pgmUnlock(pVM);
3059 return VINF_SUCCESS;
3060 }
3061
3062 /* copy the entire page and advance */
3063 memcpy(pvDst, pvSrc, PAGE_SIZE);
3064 PGMPhysReleasePageMappingLock(pVM, &Lock);
3065 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3066 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3067 cb -= PAGE_SIZE;
3068 }
3069 /* won't ever get here. */
3070}
3071
3072
3073/**
3074 * Write to guest physical memory referenced by GC pointer.
3075 *
3076 * This function uses the current CR3/CR0/CR4 of the guest and will
3077 * bypass access handlers and not set dirty or accessed bits.
3078 *
3079 * @returns VBox status.
3080 * @param pVCpu Handle to the current virtual CPU.
3081 * @param GCPtrDst The destination address (GC pointer).
3082 * @param pvSrc The source address.
3083 * @param cb The number of bytes to write.
3084 */
3085VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3086{
3087 PVM pVM = pVCpu->CTX_SUFF(pVM);
3088 VMCPU_ASSERT_EMT(pVCpu);
3089
3090 /*
3091 * Treat the first page as a special case.
3092 */
3093 if (!cb)
3094 return VINF_SUCCESS;
3095
3096 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3097 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3098
3099 /* map the 1st page */
3100 void *pvDst;
3101 PGMPAGEMAPLOCK Lock;
3102 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3103 if (RT_FAILURE(rc))
3104 return rc;
3105
3106 /* optimize for the case where access is completely within the first page. */
3107 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3108 if (RT_LIKELY(cb <= cbPage))
3109 {
3110 memcpy(pvDst, pvSrc, cb);
3111 PGMPhysReleasePageMappingLock(pVM, &Lock);
3112 return VINF_SUCCESS;
3113 }
3114
3115 /* copy to the end of the page. */
3116 memcpy(pvDst, pvSrc, cbPage);
3117 PGMPhysReleasePageMappingLock(pVM, &Lock);
3118 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3119 pvSrc = (const uint8_t *)pvSrc + cbPage;
3120 cb -= cbPage;
3121
3122 /*
3123 * Page by page.
3124 */
3125 for (;;)
3126 {
3127 /* map the page */
3128 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3129 if (RT_FAILURE(rc))
3130 return rc;
3131
3132 /* last page? */
3133 if (cb <= PAGE_SIZE)
3134 {
3135 memcpy(pvDst, pvSrc, cb);
3136 PGMPhysReleasePageMappingLock(pVM, &Lock);
3137 return VINF_SUCCESS;
3138 }
3139
3140 /* copy the entire page and advance */
3141 memcpy(pvDst, pvSrc, PAGE_SIZE);
3142 PGMPhysReleasePageMappingLock(pVM, &Lock);
3143 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3144 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3145 cb -= PAGE_SIZE;
3146 }
3147 /* won't ever get here. */
3148}
3149
3150
3151/**
3152 * Write to guest physical memory referenced by GC pointer and update the PTE.
3153 *
3154 * This function uses the current CR3/CR0/CR4 of the guest and will
3155 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3156 *
3157 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3158 *
3159 * @returns VBox status.
3160 * @param pVCpu Handle to the current virtual CPU.
3161 * @param GCPtrDst The destination address (GC pointer).
3162 * @param pvSrc The source address.
3163 * @param cb The number of bytes to write.
3164 */
3165VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3166{
3167 PVM pVM = pVCpu->CTX_SUFF(pVM);
3168 VMCPU_ASSERT_EMT(pVCpu);
3169
3170 /*
3171 * Treat the first page as a special case.
3172 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3173 */
3174 if (!cb)
3175 return VINF_SUCCESS;
3176
3177 /* map the 1st page */
3178 void *pvDst;
3179 PGMPAGEMAPLOCK Lock;
3180 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3181 if (RT_FAILURE(rc))
3182 return rc;
3183
3184 /* optimize for the case where access is completely within the first page. */
3185 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3186 if (RT_LIKELY(cb <= cbPage))
3187 {
3188 memcpy(pvDst, pvSrc, cb);
3189 PGMPhysReleasePageMappingLock(pVM, &Lock);
3190 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3191 return VINF_SUCCESS;
3192 }
3193
3194 /* copy to the end of the page. */
3195 memcpy(pvDst, pvSrc, cbPage);
3196 PGMPhysReleasePageMappingLock(pVM, &Lock);
3197 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3198 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3199 pvSrc = (const uint8_t *)pvSrc + cbPage;
3200 cb -= cbPage;
3201
3202 /*
3203 * Page by page.
3204 */
3205 for (;;)
3206 {
3207 /* map the page */
3208 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3209 if (RT_FAILURE(rc))
3210 return rc;
3211
3212 /* last page? */
3213 if (cb <= PAGE_SIZE)
3214 {
3215 memcpy(pvDst, pvSrc, cb);
3216 PGMPhysReleasePageMappingLock(pVM, &Lock);
3217 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3218 return VINF_SUCCESS;
3219 }
3220
3221 /* copy the entire page and advance */
3222 memcpy(pvDst, pvSrc, PAGE_SIZE);
3223 PGMPhysReleasePageMappingLock(pVM, &Lock);
3224 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3225 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3226 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3227 cb -= PAGE_SIZE;
3228 }
3229 /* won't ever get here. */
3230}
3231
3232
3233/**
3234 * Read from guest physical memory referenced by GC pointer.
3235 *
3236 * This function uses the current CR3/CR0/CR4 of the guest and will
3237 * respect access handlers and set accessed bits.
3238 *
3239 * @returns VBox status.
3240 * @param pVCpu Handle to the current virtual CPU.
3241 * @param pvDst The destination address.
3242 * @param GCPtrSrc The source address (GC pointer).
3243 * @param cb The number of bytes to read.
3244 * @thread The vCPU EMT.
3245 */
3246VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3247{
3248 RTGCPHYS GCPhys;
3249 uint64_t fFlags;
3250 int rc;
3251 PVM pVM = pVCpu->CTX_SUFF(pVM);
3252 VMCPU_ASSERT_EMT(pVCpu);
3253
3254 /*
3255 * Anything to do?
3256 */
3257 if (!cb)
3258 return VINF_SUCCESS;
3259
3260 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3261
3262 /*
3263 * Optimize reads within a single page.
3264 */
3265 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3266 {
3267 /* Convert virtual to physical address + flags */
3268 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3269 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3270 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3271
3272 /* mark the guest page as accessed. */
3273 if (!(fFlags & X86_PTE_A))
3274 {
3275 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3276 AssertRC(rc);
3277 }
3278
3279 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3280 }
3281
3282 /*
3283 * Page by page.
3284 */
3285 for (;;)
3286 {
3287 /* Convert virtual to physical address + flags */
3288 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3289 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3290 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3291
3292 /* mark the guest page as accessed. */
3293 if (!(fFlags & X86_PTE_A))
3294 {
3295 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3296 AssertRC(rc);
3297 }
3298
3299 /* copy */
3300 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3301 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3302 if (cbRead >= cb || RT_FAILURE(rc))
3303 return rc;
3304
3305 /* next */
3306 cb -= cbRead;
3307 pvDst = (uint8_t *)pvDst + cbRead;
3308 GCPtrSrc += cbRead;
3309 }
3310}
3311
3312
3313/**
3314 * Write to guest physical memory referenced by GC pointer.
3315 *
3316 * This function uses the current CR3/CR0/CR4 of the guest and will
3317 * respect access handlers and set dirty and accessed bits.
3318 *
3319 * @returns VBox status.
3320 * @retval VINF_SUCCESS.
3321 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3322 *
3323 * @param pVCpu Handle to the current virtual CPU.
3324 * @param GCPtrDst The destination address (GC pointer).
3325 * @param pvSrc The source address.
3326 * @param cb The number of bytes to write.
3327 */
3328VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3329{
3330 RTGCPHYS GCPhys;
3331 uint64_t fFlags;
3332 int rc;
3333 PVM pVM = pVCpu->CTX_SUFF(pVM);
3334 VMCPU_ASSERT_EMT(pVCpu);
3335
3336 /*
3337 * Anything to do?
3338 */
3339 if (!cb)
3340 return VINF_SUCCESS;
3341
3342 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3343
3344 /*
3345 * Optimize writes within a single page.
3346 */
3347 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3348 {
3349 /* Convert virtual to physical address + flags */
3350 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3351 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3352 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3353
3354 /* Mention when we ignore X86_PTE_RW... */
3355 if (!(fFlags & X86_PTE_RW))
3356 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3357
3358 /* Mark the guest page as accessed and dirty if necessary. */
3359 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3360 {
3361 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3362 AssertRC(rc);
3363 }
3364
3365 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3366 }
3367
3368 /*
3369 * Page by page.
3370 */
3371 for (;;)
3372 {
3373 /* Convert virtual to physical address + flags */
3374 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3375 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3376 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3377
3378 /* Mention when we ignore X86_PTE_RW... */
3379 if (!(fFlags & X86_PTE_RW))
3380 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3381
3382 /* Mark the guest page as accessed and dirty if necessary. */
3383 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3384 {
3385 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3386 AssertRC(rc);
3387 }
3388
3389 /* copy */
3390 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3391 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3392 if (cbWrite >= cb || RT_FAILURE(rc))
3393 return rc;
3394
3395 /* next */
3396 cb -= cbWrite;
3397 pvSrc = (uint8_t *)pvSrc + cbWrite;
3398 GCPtrDst += cbWrite;
3399 }
3400}
3401
3402
3403/**
3404 * Performs a read of guest virtual memory for instruction emulation.
3405 *
3406 * This will check permissions, raise exceptions and update the access bits.
3407 *
3408 * The current implementation will bypass all access handlers. It may later be
3409 * changed to at least respect MMIO.
3410 *
3411 *
3412 * @returns VBox status code suitable to scheduling.
3413 * @retval VINF_SUCCESS if the read was performed successfully.
3414 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3415 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3416 *
3417 * @param pVCpu Handle to the current virtual CPU.
3418 * @param pCtxCore The context core.
3419 * @param pvDst Where to put the bytes we've read.
3420 * @param GCPtrSrc The source address.
3421 * @param cb The number of bytes to read. Not more than a page.
3422 *
3423 * @remark This function will dynamically map physical pages in GC. This may unmap
3424 * mappings done by the caller. Be careful!
3425 */
3426VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3427{
3428 PVM pVM = pVCpu->CTX_SUFF(pVM);
3429 Assert(cb <= PAGE_SIZE);
3430 VMCPU_ASSERT_EMT(pVCpu);
3431
3432/** @todo r=bird: This isn't perfect!
3433 * -# It's not checking for reserved bits being 1.
3434 * -# It's not correctly dealing with the access bit.
3435 * -# It's not respecting MMIO memory or any other access handlers.
3436 */
3437 /*
3438 * 1. Translate virtual to physical. This may fault.
3439 * 2. Map the physical address.
3440 * 3. Do the read operation.
3441 * 4. Set access bits if required.
3442 */
3443 int rc;
3444 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3445 if (cb <= cb1)
3446 {
3447 /*
3448 * Not crossing pages.
3449 */
3450 RTGCPHYS GCPhys;
3451 uint64_t fFlags;
3452 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3453 if (RT_SUCCESS(rc))
3454 {
3455 /** @todo we should check reserved bits ... */
3456 PGMPAGEMAPLOCK PgMpLck;
3457 void const *pvSrc;
3458 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3459 switch (rc)
3460 {
3461 case VINF_SUCCESS:
3462 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3463 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3464 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3465 break;
3466 case VERR_PGM_PHYS_PAGE_RESERVED:
3467 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3468 memset(pvDst, 0xff, cb);
3469 break;
3470 default:
3471 Assert(RT_FAILURE_NP(rc));
3472 return rc;
3473 }
3474
3475 /** @todo access bit emulation isn't 100% correct. */
3476 if (!(fFlags & X86_PTE_A))
3477 {
3478 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3479 AssertRC(rc);
3480 }
3481 return VINF_SUCCESS;
3482 }
3483 }
3484 else
3485 {
3486 /*
3487 * Crosses pages.
3488 */
3489 size_t cb2 = cb - cb1;
3490 uint64_t fFlags1;
3491 RTGCPHYS GCPhys1;
3492 uint64_t fFlags2;
3493 RTGCPHYS GCPhys2;
3494 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3495 if (RT_SUCCESS(rc))
3496 {
3497 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3498 if (RT_SUCCESS(rc))
3499 {
3500 /** @todo we should check reserved bits ... */
3501 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3502 PGMPAGEMAPLOCK PgMpLck;
3503 void const *pvSrc1;
3504 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3505 switch (rc)
3506 {
3507 case VINF_SUCCESS:
3508 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3509 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3510 break;
3511 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3512 memset(pvDst, 0xff, cb1);
3513 break;
3514 default:
3515 Assert(RT_FAILURE_NP(rc));
3516 return rc;
3517 }
3518
3519 void const *pvSrc2;
3520 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3521 switch (rc)
3522 {
3523 case VINF_SUCCESS:
3524 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3525 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3526 break;
3527 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3528 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3529 break;
3530 default:
3531 Assert(RT_FAILURE_NP(rc));
3532 return rc;
3533 }
3534
3535 if (!(fFlags1 & X86_PTE_A))
3536 {
3537 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3538 AssertRC(rc);
3539 }
3540 if (!(fFlags2 & X86_PTE_A))
3541 {
3542 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3543 AssertRC(rc);
3544 }
3545 return VINF_SUCCESS;
3546 }
3547 }
3548 }
3549
3550 /*
3551 * Raise a #PF.
3552 */
3553 uint32_t uErr;
3554
3555 /* Get the current privilege level. */
3556 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3557 switch (rc)
3558 {
3559 case VINF_SUCCESS:
3560 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3561 break;
3562
3563 case VERR_PAGE_NOT_PRESENT:
3564 case VERR_PAGE_TABLE_NOT_PRESENT:
3565 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3566 break;
3567
3568 default:
3569 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3570 return rc;
3571 }
3572 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3573 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3574}
3575
3576
3577/**
3578 * Performs a read of guest virtual memory for instruction emulation.
3579 *
3580 * This will check permissions, raise exceptions and update the access bits.
3581 *
3582 * The current implementation will bypass all access handlers. It may later be
3583 * changed to at least respect MMIO.
3584 *
3585 *
3586 * @returns VBox status code suitable to scheduling.
3587 * @retval VINF_SUCCESS if the read was performed successfully.
3588 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3589 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3590 *
3591 * @param pVCpu Handle to the current virtual CPU.
3592 * @param pCtxCore The context core.
3593 * @param pvDst Where to put the bytes we've read.
3594 * @param GCPtrSrc The source address.
3595 * @param cb The number of bytes to read. Not more than a page.
3596 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3597 * an appropriate error status will be returned (no
3598 * informational at all).
3599 *
3600 *
3601 * @remarks Takes the PGM lock.
3602 * @remarks A page fault on the 2nd page of the access will be raised without
3603 * writing the bits on the first page since we're ASSUMING that the
3604 * caller is emulating an instruction access.
3605 * @remarks This function will dynamically map physical pages in GC. This may
3606 * unmap mappings done by the caller. Be careful!
3607 */
3608VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3609 bool fRaiseTrap)
3610{
3611 PVM pVM = pVCpu->CTX_SUFF(pVM);
3612 Assert(cb <= PAGE_SIZE);
3613 VMCPU_ASSERT_EMT(pVCpu);
3614
3615 /*
3616 * 1. Translate virtual to physical. This may fault.
3617 * 2. Map the physical address.
3618 * 3. Do the read operation.
3619 * 4. Set access bits if required.
3620 */
3621 int rc;
3622 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3623 if (cb <= cb1)
3624 {
3625 /*
3626 * Not crossing pages.
3627 */
3628 RTGCPHYS GCPhys;
3629 uint64_t fFlags;
3630 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3631 if (RT_SUCCESS(rc))
3632 {
3633 if (1) /** @todo we should check reserved bits ... */
3634 {
3635 const void *pvSrc;
3636 PGMPAGEMAPLOCK Lock;
3637 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3638 switch (rc)
3639 {
3640 case VINF_SUCCESS:
3641 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3642 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3643 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3644 PGMPhysReleasePageMappingLock(pVM, &Lock);
3645 break;
3646 case VERR_PGM_PHYS_PAGE_RESERVED:
3647 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3648 memset(pvDst, 0xff, cb);
3649 break;
3650 default:
3651 AssertMsgFailed(("%Rrc\n", rc));
3652 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3653 return rc;
3654 }
3655
3656 if (!(fFlags & X86_PTE_A))
3657 {
3658 /** @todo access bit emulation isn't 100% correct. */
3659 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3660 AssertRC(rc);
3661 }
3662 return VINF_SUCCESS;
3663 }
3664 }
3665 }
3666 else
3667 {
3668 /*
3669 * Crosses pages.
3670 */
3671 size_t cb2 = cb - cb1;
3672 uint64_t fFlags1;
3673 RTGCPHYS GCPhys1;
3674 uint64_t fFlags2;
3675 RTGCPHYS GCPhys2;
3676 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3677 if (RT_SUCCESS(rc))
3678 {
3679 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3680 if (RT_SUCCESS(rc))
3681 {
3682 if (1) /** @todo we should check reserved bits ... */
3683 {
3684 const void *pvSrc;
3685 PGMPAGEMAPLOCK Lock;
3686 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3687 switch (rc)
3688 {
3689 case VINF_SUCCESS:
3690 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3691 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3692 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3693 PGMPhysReleasePageMappingLock(pVM, &Lock);
3694 break;
3695 case VERR_PGM_PHYS_PAGE_RESERVED:
3696 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3697 memset(pvDst, 0xff, cb1);
3698 break;
3699 default:
3700 AssertMsgFailed(("%Rrc\n", rc));
3701 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3702 return rc;
3703 }
3704
3705 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3706 switch (rc)
3707 {
3708 case VINF_SUCCESS:
3709 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3710 PGMPhysReleasePageMappingLock(pVM, &Lock);
3711 break;
3712 case VERR_PGM_PHYS_PAGE_RESERVED:
3713 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3714 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3715 break;
3716 default:
3717 AssertMsgFailed(("%Rrc\n", rc));
3718 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3719 return rc;
3720 }
3721
3722 if (!(fFlags1 & X86_PTE_A))
3723 {
3724 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3725 AssertRC(rc);
3726 }
3727 if (!(fFlags2 & X86_PTE_A))
3728 {
3729 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3730 AssertRC(rc);
3731 }
3732 return VINF_SUCCESS;
3733 }
3734 /* sort out which page */
3735 }
3736 else
3737 GCPtrSrc += cb1; /* fault on 2nd page */
3738 }
3739 }
3740
3741 /*
3742 * Raise a #PF if we're allowed to do that.
3743 */
3744 /* Calc the error bits. */
3745 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3746 uint32_t uErr;
3747 switch (rc)
3748 {
3749 case VINF_SUCCESS:
3750 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3751 rc = VERR_ACCESS_DENIED;
3752 break;
3753
3754 case VERR_PAGE_NOT_PRESENT:
3755 case VERR_PAGE_TABLE_NOT_PRESENT:
3756 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3757 break;
3758
3759 default:
3760 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3761 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3762 return rc;
3763 }
3764 if (fRaiseTrap)
3765 {
3766 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3767 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3768 }
3769 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3770 return rc;
3771}
3772
3773
3774/**
3775 * Performs a write to guest virtual memory for instruction emulation.
3776 *
3777 * This will check permissions, raise exceptions and update the dirty and access
3778 * bits.
3779 *
3780 * @returns VBox status code suitable to scheduling.
3781 * @retval VINF_SUCCESS if the read was performed successfully.
3782 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3783 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3784 *
3785 * @param pVCpu Handle to the current virtual CPU.
3786 * @param pCtxCore The context core.
3787 * @param GCPtrDst The destination address.
3788 * @param pvSrc What to write.
3789 * @param cb The number of bytes to write. Not more than a page.
3790 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3791 * an appropriate error status will be returned (no
3792 * informational at all).
3793 *
3794 * @remarks Takes the PGM lock.
3795 * @remarks A page fault on the 2nd page of the access will be raised without
3796 * writing the bits on the first page since we're ASSUMING that the
3797 * caller is emulating an instruction access.
3798 * @remarks This function will dynamically map physical pages in GC. This may
3799 * unmap mappings done by the caller. Be careful!
3800 */
3801VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3802 size_t cb, bool fRaiseTrap)
3803{
3804 Assert(cb <= PAGE_SIZE);
3805 PVM pVM = pVCpu->CTX_SUFF(pVM);
3806 VMCPU_ASSERT_EMT(pVCpu);
3807
3808 /*
3809 * 1. Translate virtual to physical. This may fault.
3810 * 2. Map the physical address.
3811 * 3. Do the write operation.
3812 * 4. Set access bits if required.
3813 */
3814 /** @todo Since this method is frequently used by EMInterpret or IOM
3815 * upon a write fault to an write access monitored page, we can
3816 * reuse the guest page table walking from the \#PF code. */
3817 int rc;
3818 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3819 if (cb <= cb1)
3820 {
3821 /*
3822 * Not crossing pages.
3823 */
3824 RTGCPHYS GCPhys;
3825 uint64_t fFlags;
3826 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3827 if (RT_SUCCESS(rc))
3828 {
3829 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3830 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3831 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3832 {
3833 void *pvDst;
3834 PGMPAGEMAPLOCK Lock;
3835 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3836 switch (rc)
3837 {
3838 case VINF_SUCCESS:
3839 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3840 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3841 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3842 PGMPhysReleasePageMappingLock(pVM, &Lock);
3843 break;
3844 case VERR_PGM_PHYS_PAGE_RESERVED:
3845 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3846 /* bit bucket */
3847 break;
3848 default:
3849 AssertMsgFailed(("%Rrc\n", rc));
3850 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3851 return rc;
3852 }
3853
3854 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3855 {
3856 /** @todo dirty & access bit emulation isn't 100% correct. */
3857 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3858 AssertRC(rc);
3859 }
3860 return VINF_SUCCESS;
3861 }
3862 rc = VERR_ACCESS_DENIED;
3863 }
3864 }
3865 else
3866 {
3867 /*
3868 * Crosses pages.
3869 */
3870 size_t cb2 = cb - cb1;
3871 uint64_t fFlags1;
3872 RTGCPHYS GCPhys1;
3873 uint64_t fFlags2;
3874 RTGCPHYS GCPhys2;
3875 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3876 if (RT_SUCCESS(rc))
3877 {
3878 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3879 if (RT_SUCCESS(rc))
3880 {
3881 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3882 && (fFlags2 & X86_PTE_RW))
3883 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3884 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3885 {
3886 void *pvDst;
3887 PGMPAGEMAPLOCK Lock;
3888 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3889 switch (rc)
3890 {
3891 case VINF_SUCCESS:
3892 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3893 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3894 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3895 PGMPhysReleasePageMappingLock(pVM, &Lock);
3896 break;
3897 case VERR_PGM_PHYS_PAGE_RESERVED:
3898 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3899 /* bit bucket */
3900 break;
3901 default:
3902 AssertMsgFailed(("%Rrc\n", rc));
3903 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3904 return rc;
3905 }
3906
3907 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3908 switch (rc)
3909 {
3910 case VINF_SUCCESS:
3911 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3912 PGMPhysReleasePageMappingLock(pVM, &Lock);
3913 break;
3914 case VERR_PGM_PHYS_PAGE_RESERVED:
3915 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3916 /* bit bucket */
3917 break;
3918 default:
3919 AssertMsgFailed(("%Rrc\n", rc));
3920 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3921 return rc;
3922 }
3923
3924 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3925 {
3926 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3927 AssertRC(rc);
3928 }
3929 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3930 {
3931 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3932 AssertRC(rc);
3933 }
3934 return VINF_SUCCESS;
3935 }
3936 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3937 GCPtrDst += cb1; /* fault on the 2nd page. */
3938 rc = VERR_ACCESS_DENIED;
3939 }
3940 else
3941 GCPtrDst += cb1; /* fault on the 2nd page. */
3942 }
3943 }
3944
3945 /*
3946 * Raise a #PF if we're allowed to do that.
3947 */
3948 /* Calc the error bits. */
3949 uint32_t uErr;
3950 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3951 switch (rc)
3952 {
3953 case VINF_SUCCESS:
3954 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3955 rc = VERR_ACCESS_DENIED;
3956 break;
3957
3958 case VERR_ACCESS_DENIED:
3959 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3960 break;
3961
3962 case VERR_PAGE_NOT_PRESENT:
3963 case VERR_PAGE_TABLE_NOT_PRESENT:
3964 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3965 break;
3966
3967 default:
3968 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3969 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3970 return rc;
3971 }
3972 if (fRaiseTrap)
3973 {
3974 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3975 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3976 }
3977 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3978 return rc;
3979}
3980
3981
3982/**
3983 * Return the page type of the specified physical address.
3984 *
3985 * @returns The page type.
3986 * @param pVM VM Handle.
3987 * @param GCPhys Guest physical address
3988 */
3989VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
3990{
3991 pgmLock(pVM);
3992 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3993 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3994 pgmUnlock(pVM);
3995
3996 return enmPgType;
3997}
3998
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette