VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 37136

Last change on this file since 37136 was 36902, checked in by vboxsync, 14 years ago

PGM: Implemented RAM range search trees (disabled).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 136.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 36902 2011-04-30 11:57:28Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#include <VBox/vmm/rem.h>
28#include "PGMInternal.h"
29#include <VBox/vmm/vm.h>
30#include "PGMInline.h"
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <iprt/assert.h>
34#include <iprt/string.h>
35#include <iprt/asm-amd64-x86.h>
36#include <VBox/log.h>
37#ifdef IN_RING3
38# include <iprt/thread.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** Enable the physical TLB. */
46#define PGM_WITH_PHYS_TLB
47
48
49
50#ifndef IN_RING3
51
52/**
53 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
54 * This simply pushes everything to the HC handler.
55 *
56 * @returns VBox status code (appropriate for trap handling and GC return).
57 * @param pVM VM Handle.
58 * @param uErrorCode CPU Error code.
59 * @param pRegFrame Trap register frame.
60 * @param pvFault The fault address (cr2).
61 * @param GCPhysFault The GC physical address corresponding to pvFault.
62 * @param pvUser User argument.
63 */
64VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
65{
66 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
67}
68
69
70/**
71 * \#PF Handler callback for Guest ROM range write access.
72 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
73 *
74 * @returns VBox status code (appropriate for trap handling and GC return).
75 * @param pVM VM Handle.
76 * @param uErrorCode CPU Error code.
77 * @param pRegFrame Trap register frame.
78 * @param pvFault The fault address (cr2).
79 * @param GCPhysFault The GC physical address corresponding to pvFault.
80 * @param pvUser User argument. Pointer to the ROM range structure.
81 */
82VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
83{
84 int rc;
85 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
86 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
87 PVMCPU pVCpu = VMMGetCpu(pVM);
88
89 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
90 switch (pRom->aPages[iPage].enmProt)
91 {
92 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
93 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
94 {
95 /*
96 * If it's a simple instruction which doesn't change the cpu state
97 * we will simply skip it. Otherwise we'll have to defer it to REM.
98 */
99 uint32_t cbOp;
100 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
101 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
102 if ( RT_SUCCESS(rc)
103 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
104 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
105 {
106 switch (pDis->opcode)
107 {
108 /** @todo Find other instructions we can safely skip, possibly
109 * adding this kind of detection to DIS or EM. */
110 case OP_MOV:
111 pRegFrame->rip += cbOp;
112 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
113 return VINF_SUCCESS;
114 }
115 }
116 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
117 return rc;
118 break;
119 }
120
121 case PGMROMPROT_READ_RAM_WRITE_RAM:
122 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
123 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
124 AssertRC(rc);
125 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
126
127 case PGMROMPROT_READ_ROM_WRITE_RAM:
128 /* Handle it in ring-3 because it's *way* easier there. */
129 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
130 break;
131
132 default:
133 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
134 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
135 VERR_INTERNAL_ERROR);
136 }
137
138 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
139 return VINF_EM_RAW_EMULATE_INSTR;
140}
141
142#endif /* IN_RING3 */
143
144/**
145 * Invalidates the RAM range TLBs.
146 *
147 * @param pVM The VM handle.
148 */
149void pgmPhysInvalidRamRangeTlbs(PVM pVM)
150{
151 pgmLock(pVM);
152 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
153 {
154 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
155 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
156 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
157 }
158 pgmUnlock(pVM);
159}
160
161
162/**
163 * Tests if a value of type RTGCPHYS is negative if the type had been signed
164 * instead of unsigned.
165 *
166 * @returns @c true if negative, @c false if positive or zero.
167 * @param a_GCPhys The value to test.
168 * @todo Move me to iprt/types.h.
169 */
170#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
171
172
173/**
174 * Slow worker for pgmPhysGetRange.
175 *
176 * @copydoc pgmPhysGetRange
177 */
178PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
179{
180 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
181
182#ifdef PGM_USE_RAMRANGE_SEARCH_TREES
183 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
184 while (pRam)
185 {
186 RTGCPHYS off = GCPhys - pRam->GCPhys;
187 if (off < pRam->cb)
188 {
189 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
190 return pRam;
191 }
192 if (RTGCPHYS_IS_NEGATIVE(off))
193 pRam = pRam->CTX_SUFF(pLeft);
194 else
195 pRam = pRam->CTX_SUFF(pRight);
196 }
197 return NULL;
198#else
199 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
200 while (GCPhys > pRam->GCPhysLast)
201 {
202 pRam = pRam->CTX_SUFF(pNext);
203 if (!pRam)
204 return NULL;
205 }
206 if (GCPhys < pRam->GCPhys)
207 return NULL;
208
209 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
210 return pRam;
211#endif
212}
213
214
215/**
216 * Slow worker for pgmPhysGetRangeAtOrAbove.
217 *
218 * @copydoc pgmPhysGetRangeAtOrAbove
219 */
220PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
221{
222 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
223
224#ifdef PGM_USE_RAMRANGE_SEARCH_TREES
225 PPGMRAMRANGE pLastLeft = NULL;
226 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
227 while (pRam)
228 {
229 RTGCPHYS off = GCPhys - pRam->GCPhys;
230 if (off < pRam->cb)
231 {
232 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
233 return pRam;
234 }
235 if (RTGCPHYS_IS_NEGATIVE(off))
236 {
237 pLastLeft = pRam;
238 pRam = pRam->CTX_SUFF(pLeft);
239 }
240 else
241 pRam = pRam->CTX_SUFF(pRight);
242 }
243 return pLastLeft;
244#else
245 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
246 while (GCPhys > pRam->GCPhysLast)
247 {
248 pRam = pRam->CTX_SUFF(pNext);
249 if (!pRam)
250 return NULL;
251 }
252 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
253 return pRam;
254#endif
255}
256
257
258/**
259 * Slow worker for pgmPhysGetPage.
260 *
261 * @copydoc pgmPhysGetPage
262 */
263PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
264{
265 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
266
267#ifdef PGM_USE_RAMRANGE_SEARCH_TREES
268 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
269 while (pRam)
270 {
271 RTGCPHYS off = GCPhys - pRam->GCPhys;
272 if (off < pRam->cb)
273 {
274 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
275 return &pRam->aPages[off >> PAGE_SHIFT];
276 }
277
278 if (RTGCPHYS_IS_NEGATIVE(off))
279 pRam = pRam->CTX_SUFF(pLeft);
280 else
281 pRam = pRam->CTX_SUFF(pRight);
282 }
283#else
284 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
285 pRam;
286 pRam = pRam->CTX_SUFF(pNext))
287 {
288 RTGCPHYS off = GCPhys - pRam->GCPhys;
289 if (off < pRam->cb)
290 {
291 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
292 return &pRam->aPages[off >> PAGE_SHIFT];
293 }
294 }
295#endif
296 return NULL;
297}
298
299
300/**
301 * Slow worker for pgmPhysGetPageEx.
302 *
303 * @copydoc pgmPhysGetPageEx
304 */
305int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
306{
307 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
308
309#ifdef PGM_USE_RAMRANGE_SEARCH_TREES
310 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
311 while (pRam)
312 {
313 RTGCPHYS off = GCPhys - pRam->GCPhys;
314 if (off < pRam->cb)
315 {
316 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
317 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
318 return VINF_SUCCESS;
319 }
320
321 if (RTGCPHYS_IS_NEGATIVE(off))
322 pRam = pRam->CTX_SUFF(pLeft);
323 else
324 pRam = pRam->CTX_SUFF(pRight);
325 }
326#else
327 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
328 pRam;
329 pRam = pRam->CTX_SUFF(pNext))
330 {
331 RTGCPHYS off = GCPhys - pRam->GCPhys;
332 if (off < pRam->cb)
333 {
334 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
335 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
336 return VINF_SUCCESS;
337 }
338 }
339#endif
340
341 *ppPage = NULL;
342 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
343}
344
345
346/**
347 * Slow worker for pgmPhysGetPageAndRangeEx.
348 *
349 * @copydoc pgmPhysGetPageAndRangeEx
350 */
351int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
352{
353 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
354
355#ifdef PGM_USE_RAMRANGE_SEARCH_TREES
356 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
357 while (pRam)
358 {
359 RTGCPHYS off = GCPhys - pRam->GCPhys;
360 if (off < pRam->cb)
361 {
362 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
363 *ppRam = pRam;
364 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
365 return VINF_SUCCESS;
366 }
367
368 if (RTGCPHYS_IS_NEGATIVE(off))
369 pRam = pRam->CTX_SUFF(pLeft);
370 else
371 pRam = pRam->CTX_SUFF(pRight);
372 }
373#else
374 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
375 pRam;
376 pRam = pRam->CTX_SUFF(pNext))
377 {
378 RTGCPHYS off = GCPhys - pRam->GCPhys;
379 if (off < pRam->cb)
380 {
381 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
382 *ppRam = pRam;
383 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
384 return VINF_SUCCESS;
385 }
386 }
387#endif
388
389 *ppRam = NULL;
390 *ppPage = NULL;
391 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
392}
393
394
395/**
396 * Checks if Address Gate 20 is enabled or not.
397 *
398 * @returns true if enabled.
399 * @returns false if disabled.
400 * @param pVCpu VMCPU handle.
401 */
402VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
403{
404 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
405 return pVCpu->pgm.s.fA20Enabled;
406}
407
408
409/**
410 * Validates a GC physical address.
411 *
412 * @returns true if valid.
413 * @returns false if invalid.
414 * @param pVM The VM handle.
415 * @param GCPhys The physical address to validate.
416 */
417VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
418{
419 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
420 return pPage != NULL;
421}
422
423
424/**
425 * Checks if a GC physical address is a normal page,
426 * i.e. not ROM, MMIO or reserved.
427 *
428 * @returns true if normal.
429 * @returns false if invalid, ROM, MMIO or reserved page.
430 * @param pVM The VM handle.
431 * @param GCPhys The physical address to check.
432 */
433VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
434{
435 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
436 return pPage
437 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
438}
439
440
441/**
442 * Converts a GC physical address to a HC physical address.
443 *
444 * @returns VINF_SUCCESS on success.
445 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
446 * page but has no physical backing.
447 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
448 * GC physical address.
449 *
450 * @param pVM The VM handle.
451 * @param GCPhys The GC physical address to convert.
452 * @param pHCPhys Where to store the HC physical address on success.
453 */
454VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
455{
456 pgmLock(pVM);
457 PPGMPAGE pPage;
458 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
459 if (RT_SUCCESS(rc))
460 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
461 pgmUnlock(pVM);
462 return rc;
463}
464
465
466/**
467 * Invalidates all page mapping TLBs.
468 *
469 * @param pVM The VM handle.
470 */
471VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
472{
473 pgmLock(pVM);
474 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
475 /* Clear the shared R0/R3 TLB completely. */
476 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
477 {
478 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
479 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
480 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
481 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
482 }
483 /** @todo clear the RC TLB whenever we add it. */
484 pgmUnlock(pVM);
485}
486
487/**
488 * Invalidates a page mapping TLB entry
489 *
490 * @param pVM The VM handle.
491 * @param GCPhys GCPhys entry to flush
492 */
493VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
494{
495 Assert(PGMIsLocked(pVM));
496
497 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
498 /* Clear the shared R0/R3 TLB entry. */
499#ifdef IN_RC
500 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
501 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
502 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
503 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
504 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
505#else
506 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
507 pTlbe->GCPhys = NIL_RTGCPHYS;
508 pTlbe->pPage = 0;
509 pTlbe->pMap = 0;
510 pTlbe->pv = 0;
511#endif
512 /* @todo clear the RC TLB whenever we add it. */
513}
514
515/**
516 * Makes sure that there is at least one handy page ready for use.
517 *
518 * This will also take the appropriate actions when reaching water-marks.
519 *
520 * @returns VBox status code.
521 * @retval VINF_SUCCESS on success.
522 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
523 *
524 * @param pVM The VM handle.
525 *
526 * @remarks Must be called from within the PGM critical section. It may
527 * nip back to ring-3/0 in some cases.
528 */
529static int pgmPhysEnsureHandyPage(PVM pVM)
530{
531 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
532
533 /*
534 * Do we need to do anything special?
535 */
536#ifdef IN_RING3
537 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
538#else
539 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
540#endif
541 {
542 /*
543 * Allocate pages only if we're out of them, or in ring-3, almost out.
544 */
545#ifdef IN_RING3
546 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
547#else
548 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
549#endif
550 {
551 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
552 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
553#ifdef IN_RING3
554 int rc = PGMR3PhysAllocateHandyPages(pVM);
555#else
556 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
557#endif
558 if (RT_UNLIKELY(rc != VINF_SUCCESS))
559 {
560 if (RT_FAILURE(rc))
561 return rc;
562 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
563 if (!pVM->pgm.s.cHandyPages)
564 {
565 LogRel(("PGM: no more handy pages!\n"));
566 return VERR_EM_NO_MEMORY;
567 }
568 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
569 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
570#ifdef IN_RING3
571 REMR3NotifyFF(pVM);
572#else
573 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
574#endif
575 }
576 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
577 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
578 ("%u\n", pVM->pgm.s.cHandyPages),
579 VERR_INTERNAL_ERROR);
580 }
581 else
582 {
583 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
584 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
585#ifndef IN_RING3
586 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
587 {
588 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
589 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
590 }
591#endif
592 }
593 }
594
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * Replace a zero or shared page with new page that we can write to.
601 *
602 * @returns The following VBox status codes.
603 * @retval VINF_SUCCESS on success, pPage is modified.
604 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
605 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
606 *
607 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
608 *
609 * @param pVM The VM address.
610 * @param pPage The physical page tracking structure. This will
611 * be modified on success.
612 * @param GCPhys The address of the page.
613 *
614 * @remarks Must be called from within the PGM critical section. It may
615 * nip back to ring-3/0 in some cases.
616 *
617 * @remarks This function shouldn't really fail, however if it does
618 * it probably means we've screwed up the size of handy pages and/or
619 * the low-water mark. Or, that some device I/O is causing a lot of
620 * pages to be allocated while while the host is in a low-memory
621 * condition. This latter should be handled elsewhere and in a more
622 * controlled manner, it's on the @bugref{3170} todo list...
623 */
624int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
625{
626 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
627
628 /*
629 * Prereqs.
630 */
631 Assert(PGMIsLocked(pVM));
632 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
633 Assert(!PGM_PAGE_IS_MMIO(pPage));
634
635# ifdef PGM_WITH_LARGE_PAGES
636 /*
637 * Try allocate a large page if applicable.
638 */
639 if ( PGMIsUsingLargePages(pVM)
640 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
641 {
642 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
643 PPGMPAGE pBasePage;
644
645 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
646 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
647 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
648 {
649 rc = pgmPhysAllocLargePage(pVM, GCPhys);
650 if (rc == VINF_SUCCESS)
651 return rc;
652 }
653 /* Mark the base as type page table, so we don't check over and over again. */
654 PGM_PAGE_SET_PDE_TYPE(pBasePage, PGM_PAGE_PDE_TYPE_PT);
655
656 /* fall back to 4KB pages. */
657 }
658# endif
659
660 /*
661 * Flush any shadow page table mappings of the page.
662 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
663 */
664 bool fFlushTLBs = false;
665 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
666 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
667
668 /*
669 * Ensure that we've got a page handy, take it and use it.
670 */
671 int rc2 = pgmPhysEnsureHandyPage(pVM);
672 if (RT_FAILURE(rc2))
673 {
674 if (fFlushTLBs)
675 PGM_INVL_ALL_VCPU_TLBS(pVM);
676 Assert(rc2 == VERR_EM_NO_MEMORY);
677 return rc2;
678 }
679 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
680 Assert(PGMIsLocked(pVM));
681 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
682 Assert(!PGM_PAGE_IS_MMIO(pPage));
683
684 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
685 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
686 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
687 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
688 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
689 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
690
691 /*
692 * There are one or two action to be taken the next time we allocate handy pages:
693 * - Tell the GMM (global memory manager) what the page is being used for.
694 * (Speeds up replacement operations - sharing and defragmenting.)
695 * - If the current backing is shared, it must be freed.
696 */
697 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
698 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
699
700 const void *pvSharedPage = NULL;
701
702 if (PGM_PAGE_IS_SHARED(pPage))
703 {
704 /* Mark this shared page for freeing/dereferencing. */
705 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
706 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
707
708 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
709 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
710 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
711 pVM->pgm.s.cSharedPages--;
712
713 /* Grab the address of the page so we can make a copy later on. */
714 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
715 AssertRC(rc);
716 }
717 else
718 {
719 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
720 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
721 pVM->pgm.s.cZeroPages--;
722 }
723
724 /*
725 * Do the PGMPAGE modifications.
726 */
727 pVM->pgm.s.cPrivatePages++;
728 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
729 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
730 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
731 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
732 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
733
734 /* Copy the shared page contents to the replacement page. */
735 if (pvSharedPage)
736 {
737 /* Get the virtual address of the new page. */
738 void *pvNewPage;
739 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage);
740 AssertRC(rc);
741 if (rc == VINF_SUCCESS)
742 {
743 /** @todo todo write ASMMemCopyPage */
744 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE);
745 }
746 }
747
748 if ( fFlushTLBs
749 && rc != VINF_PGM_GCPHYS_ALIASED)
750 PGM_INVL_ALL_VCPU_TLBS(pVM);
751 return rc;
752}
753
754#ifdef PGM_WITH_LARGE_PAGES
755
756/**
757 * Replace a 2 MB range of zero pages with new pages that we can write to.
758 *
759 * @returns The following VBox status codes.
760 * @retval VINF_SUCCESS on success, pPage is modified.
761 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
762 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
763 *
764 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
765 *
766 * @param pVM The VM address.
767 * @param GCPhys The address of the page.
768 *
769 * @remarks Must be called from within the PGM critical section. It may
770 * nip back to ring-3/0 in some cases.
771 */
772int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
773{
774 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
775 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
776
777 /*
778 * Prereqs.
779 */
780 Assert(PGMIsLocked(pVM));
781 Assert(PGMIsUsingLargePages(pVM));
782
783 PPGMPAGE pFirstPage;
784 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
785 if ( RT_SUCCESS(rc)
786 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
787 {
788 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
789
790 /* Don't call this function for already allocated pages. */
791 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
792
793 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
794 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
795 {
796 /* Lazy approach: check all pages in the 2 MB range.
797 * The whole range must be ram and unallocated. */
798 GCPhys = GCPhysBase;
799 unsigned iPage;
800 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
801 {
802 PPGMPAGE pSubPage;
803 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
804 if ( RT_FAILURE(rc)
805 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
806 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
807 {
808 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
809 break;
810 }
811 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
812 GCPhys += PAGE_SIZE;
813 }
814 if (iPage != _2M/PAGE_SIZE)
815 {
816 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
817 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
818 PGM_PAGE_SET_PDE_TYPE(pFirstPage, PGM_PAGE_PDE_TYPE_PT);
819 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
820 }
821
822 /*
823 * Do the allocation.
824 */
825# ifdef IN_RING3
826 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
827# else
828 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
829# endif
830 if (RT_SUCCESS(rc))
831 {
832 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
833 pVM->pgm.s.cLargePages++;
834 return VINF_SUCCESS;
835 }
836
837 /* If we fail once, it most likely means the host's memory is too
838 fragmented; don't bother trying again. */
839 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
840 PGMSetLargePageUsage(pVM, false);
841 return rc;
842 }
843 }
844 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
845}
846
847
848/**
849 * Recheck the entire 2 MB range to see if we can use it again as a large page.
850 *
851 * @returns The following VBox status codes.
852 * @retval VINF_SUCCESS on success, the large page can be used again
853 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
854 *
855 * @param pVM The VM address.
856 * @param GCPhys The address of the page.
857 * @param pLargePage Page structure of the base page
858 */
859int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
860{
861 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
862
863 GCPhys &= X86_PDE2M_PAE_PG_MASK;
864
865 /* Check the base page. */
866 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
867 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
868 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
869 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
870 {
871 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
872 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
873 }
874
875 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
876 /* Check all remaining pages in the 2 MB range. */
877 unsigned i;
878 GCPhys += PAGE_SIZE;
879 for (i = 1; i < _2M/PAGE_SIZE; i++)
880 {
881 PPGMPAGE pPage;
882 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
883 AssertRCBreak(rc);
884
885 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
886 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
887 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
888 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
889 {
890 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
891 break;
892 }
893
894 GCPhys += PAGE_SIZE;
895 }
896 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
897
898 if (i == _2M/PAGE_SIZE)
899 {
900 PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE);
901 pVM->pgm.s.cLargePagesDisabled--;
902 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
903 return VINF_SUCCESS;
904 }
905
906 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
907}
908
909#endif /* PGM_WITH_LARGE_PAGES */
910
911/**
912 * Deal with a write monitored page.
913 *
914 * @returns VBox strict status code.
915 *
916 * @param pVM The VM address.
917 * @param pPage The physical page tracking structure.
918 *
919 * @remarks Called from within the PGM critical section.
920 */
921void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
922{
923 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
924 PGM_PAGE_SET_WRITTEN_TO(pPage);
925 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
926 Assert(pVM->pgm.s.cMonitoredPages > 0);
927 pVM->pgm.s.cMonitoredPages--;
928 pVM->pgm.s.cWrittenToPages++;
929}
930
931
932/**
933 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
934 *
935 * @returns VBox strict status code.
936 * @retval VINF_SUCCESS on success.
937 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
938 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
939 *
940 * @param pVM The VM address.
941 * @param pPage The physical page tracking structure.
942 * @param GCPhys The address of the page.
943 *
944 * @remarks Called from within the PGM critical section.
945 */
946int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
947{
948 Assert(PGMIsLockOwner(pVM));
949 switch (PGM_PAGE_GET_STATE(pPage))
950 {
951 case PGM_PAGE_STATE_WRITE_MONITORED:
952 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
953 /* fall thru */
954 default: /* to shut up GCC */
955 case PGM_PAGE_STATE_ALLOCATED:
956 return VINF_SUCCESS;
957
958 /*
959 * Zero pages can be dummy pages for MMIO or reserved memory,
960 * so we need to check the flags before joining cause with
961 * shared page replacement.
962 */
963 case PGM_PAGE_STATE_ZERO:
964 if (PGM_PAGE_IS_MMIO(pPage))
965 return VERR_PGM_PHYS_PAGE_RESERVED;
966 /* fall thru */
967 case PGM_PAGE_STATE_SHARED:
968 return pgmPhysAllocPage(pVM, pPage, GCPhys);
969
970 /* Not allowed to write to ballooned pages. */
971 case PGM_PAGE_STATE_BALLOONED:
972 return VERR_PGM_PHYS_PAGE_BALLOONED;
973 }
974}
975
976
977/**
978 * Internal usage: Map the page specified by its GMM ID.
979 *
980 * This is similar to pgmPhysPageMap
981 *
982 * @returns VBox status code.
983 *
984 * @param pVM The VM handle.
985 * @param idPage The Page ID.
986 * @param HCPhys The physical address (for RC).
987 * @param ppv Where to store the mapping address.
988 *
989 * @remarks Called from within the PGM critical section. The mapping is only
990 * valid while your inside this section.
991 */
992int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
993{
994 /*
995 * Validation.
996 */
997 Assert(PGMIsLocked(pVM));
998 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
999 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1000 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1001
1002#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1003 /*
1004 * Map it by HCPhys.
1005 */
1006 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1007
1008#else
1009 /*
1010 * Find/make Chunk TLB entry for the mapping chunk.
1011 */
1012 PPGMCHUNKR3MAP pMap;
1013 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1014 if (pTlbe->idChunk == idChunk)
1015 {
1016 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1017 pMap = pTlbe->pChunk;
1018 }
1019 else
1020 {
1021 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1022
1023 /*
1024 * Find the chunk, map it if necessary.
1025 */
1026 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1027 if (!pMap)
1028 {
1029# ifdef IN_RING0
1030 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1031 AssertRCReturn(rc, rc);
1032 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1033 Assert(pMap);
1034# else
1035 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1036 if (RT_FAILURE(rc))
1037 return rc;
1038# endif
1039 }
1040
1041 /*
1042 * Enter it into the Chunk TLB.
1043 */
1044 pTlbe->idChunk = idChunk;
1045 pTlbe->pChunk = pMap;
1046 pMap->iAge = 0;
1047 }
1048
1049 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1050 return VINF_SUCCESS;
1051#endif
1052}
1053
1054
1055/**
1056 * Maps a page into the current virtual address space so it can be accessed.
1057 *
1058 * @returns VBox status code.
1059 * @retval VINF_SUCCESS on success.
1060 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1061 *
1062 * @param pVM The VM address.
1063 * @param pPage The physical page tracking structure.
1064 * @param GCPhys The address of the page.
1065 * @param ppMap Where to store the address of the mapping tracking structure.
1066 * @param ppv Where to store the mapping address of the page. The page
1067 * offset is masked off!
1068 *
1069 * @remarks Called from within the PGM critical section.
1070 */
1071static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1072{
1073 Assert(PGMIsLocked(pVM));
1074
1075#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1076 /*
1077 * Just some sketchy GC/R0-darwin code.
1078 */
1079 *ppMap = NULL;
1080 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1081 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1082 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1083 return VINF_SUCCESS;
1084
1085#else /* IN_RING3 || IN_RING0 */
1086
1087
1088 /*
1089 * Special case: ZERO and MMIO2 pages.
1090 */
1091 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1092 if (idChunk == NIL_GMM_CHUNKID)
1093 {
1094 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
1095 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
1096 {
1097 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
1098 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1099 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
1100 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
1101 }
1102 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1103 {
1104 /** @todo deal with aliased MMIO2 pages somehow...
1105 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
1106 * them, that would also avoid this mess. It would actually be kind of
1107 * elegant... */
1108 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1109 }
1110 else
1111 {
1112 /** @todo handle MMIO2 */
1113 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
1114 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
1115 ("pPage=%R[pgmpage]\n", pPage),
1116 VERR_INTERNAL_ERROR_2);
1117 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1118 }
1119 *ppMap = NULL;
1120 return VINF_SUCCESS;
1121 }
1122
1123 /*
1124 * Find/make Chunk TLB entry for the mapping chunk.
1125 */
1126 PPGMCHUNKR3MAP pMap;
1127 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1128 if (pTlbe->idChunk == idChunk)
1129 {
1130 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1131 pMap = pTlbe->pChunk;
1132 }
1133 else
1134 {
1135 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1136
1137 /*
1138 * Find the chunk, map it if necessary.
1139 */
1140 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1141 if (!pMap)
1142 {
1143#ifdef IN_RING0
1144 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1145 AssertRCReturn(rc, rc);
1146 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1147 Assert(pMap);
1148#else
1149 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1150 if (RT_FAILURE(rc))
1151 return rc;
1152#endif
1153 }
1154
1155 /*
1156 * Enter it into the Chunk TLB.
1157 */
1158 pTlbe->idChunk = idChunk;
1159 pTlbe->pChunk = pMap;
1160 pMap->iAge = 0;
1161 }
1162
1163 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1164 *ppMap = pMap;
1165 return VINF_SUCCESS;
1166#endif /* IN_RING3 */
1167}
1168
1169
1170/**
1171 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1172 *
1173 * This is typically used is paths where we cannot use the TLB methods (like ROM
1174 * pages) or where there is no point in using them since we won't get many hits.
1175 *
1176 * @returns VBox strict status code.
1177 * @retval VINF_SUCCESS on success.
1178 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1179 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1180 *
1181 * @param pVM The VM address.
1182 * @param pPage The physical page tracking structure.
1183 * @param GCPhys The address of the page.
1184 * @param ppv Where to store the mapping address of the page. The page
1185 * offset is masked off!
1186 *
1187 * @remarks Called from within the PGM critical section. The mapping is only
1188 * valid while your inside this section.
1189 */
1190int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1191{
1192 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1193 if (RT_SUCCESS(rc))
1194 {
1195 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1196 PPGMPAGEMAP pMapIgnore;
1197 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1198 if (RT_FAILURE(rc2)) /* preserve rc */
1199 rc = rc2;
1200 }
1201 return rc;
1202}
1203
1204
1205/**
1206 * Maps a page into the current virtual address space so it can be accessed for
1207 * both writing and reading.
1208 *
1209 * This is typically used is paths where we cannot use the TLB methods (like ROM
1210 * pages) or where there is no point in using them since we won't get many hits.
1211 *
1212 * @returns VBox status code.
1213 * @retval VINF_SUCCESS on success.
1214 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1215 *
1216 * @param pVM The VM address.
1217 * @param pPage The physical page tracking structure. Must be in the
1218 * allocated state.
1219 * @param GCPhys The address of the page.
1220 * @param ppv Where to store the mapping address of the page. The page
1221 * offset is masked off!
1222 *
1223 * @remarks Called from within the PGM critical section. The mapping is only
1224 * valid while your inside this section.
1225 */
1226int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1227{
1228 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1229 PPGMPAGEMAP pMapIgnore;
1230 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1231}
1232
1233
1234/**
1235 * Maps a page into the current virtual address space so it can be accessed for
1236 * reading.
1237 *
1238 * This is typically used is paths where we cannot use the TLB methods (like ROM
1239 * pages) or where there is no point in using them since we won't get many hits.
1240 *
1241 * @returns VBox status code.
1242 * @retval VINF_SUCCESS on success.
1243 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1244 *
1245 * @param pVM The VM address.
1246 * @param pPage The physical page tracking structure.
1247 * @param GCPhys The address of the page.
1248 * @param ppv Where to store the mapping address of the page. The page
1249 * offset is masked off!
1250 *
1251 * @remarks Called from within the PGM critical section. The mapping is only
1252 * valid while your inside this section.
1253 */
1254int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1255{
1256 PPGMPAGEMAP pMapIgnore;
1257 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1258}
1259
1260#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1261
1262/**
1263 * Load a guest page into the ring-3 physical TLB.
1264 *
1265 * @returns VBox status code.
1266 * @retval VINF_SUCCESS on success
1267 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1268 * @param pPGM The PGM instance pointer.
1269 * @param GCPhys The guest physical address in question.
1270 */
1271int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1272{
1273 Assert(PGMIsLocked(pVM));
1274
1275 /*
1276 * Find the ram range and page and hand it over to the with-page function.
1277 * 99.8% of requests are expected to be in the first range.
1278 */
1279 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1280 if (!pPage)
1281 {
1282 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1283 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1284 }
1285
1286 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1287}
1288
1289
1290/**
1291 * Load a guest page into the ring-3 physical TLB.
1292 *
1293 * @returns VBox status code.
1294 * @retval VINF_SUCCESS on success
1295 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1296 *
1297 * @param pVM The VM handle.
1298 * @param pPage Pointer to the PGMPAGE structure corresponding to
1299 * GCPhys.
1300 * @param GCPhys The guest physical address in question.
1301 */
1302int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1303{
1304 Assert(PGMIsLocked(pVM));
1305 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1306
1307 /*
1308 * Map the page.
1309 * Make a special case for the zero page as it is kind of special.
1310 */
1311 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1312 if ( !PGM_PAGE_IS_ZERO(pPage)
1313 && !PGM_PAGE_IS_BALLOONED(pPage))
1314 {
1315 void *pv;
1316 PPGMPAGEMAP pMap;
1317 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1318 if (RT_FAILURE(rc))
1319 return rc;
1320 pTlbe->pMap = pMap;
1321 pTlbe->pv = pv;
1322 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1323 }
1324 else
1325 {
1326 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg);
1327 pTlbe->pMap = NULL;
1328 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1329 }
1330#ifdef PGM_WITH_PHYS_TLB
1331 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1332 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1333 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1334 else
1335 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1336#else
1337 pTlbe->GCPhys = NIL_RTGCPHYS;
1338#endif
1339 pTlbe->pPage = pPage;
1340 return VINF_SUCCESS;
1341}
1342
1343#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1344
1345/**
1346 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1347 * own the PGM lock and therefore not need to lock the mapped page.
1348 *
1349 * @returns VBox status code.
1350 * @retval VINF_SUCCESS on success.
1351 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1352 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1353 *
1354 * @param pVM The VM handle.
1355 * @param GCPhys The guest physical address of the page that should be mapped.
1356 * @param pPage Pointer to the PGMPAGE structure for the page.
1357 * @param ppv Where to store the address corresponding to GCPhys.
1358 *
1359 * @internal
1360 */
1361int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1362{
1363 int rc;
1364 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1365 Assert(PGMIsLocked(pVM));
1366
1367 /*
1368 * Make sure the page is writable.
1369 */
1370 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1371 {
1372 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1373 if (RT_FAILURE(rc))
1374 return rc;
1375 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1376 }
1377 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1378
1379 /*
1380 * Get the mapping address.
1381 */
1382#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1383 void *pv;
1384 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1385 PGM_PAGE_GET_HCPHYS(pPage),
1386 &pv
1387 RTLOG_COMMA_SRC_POS);
1388 if (RT_FAILURE(rc))
1389 return rc;
1390 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1391#else
1392 PPGMPAGEMAPTLBE pTlbe;
1393 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1394 if (RT_FAILURE(rc))
1395 return rc;
1396 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1397#endif
1398 return VINF_SUCCESS;
1399}
1400
1401
1402/**
1403 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1404 * own the PGM lock and therefore not need to lock the mapped page.
1405 *
1406 * @returns VBox status code.
1407 * @retval VINF_SUCCESS on success.
1408 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1409 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1410 *
1411 * @param pVM The VM handle.
1412 * @param GCPhys The guest physical address of the page that should be mapped.
1413 * @param pPage Pointer to the PGMPAGE structure for the page.
1414 * @param ppv Where to store the address corresponding to GCPhys.
1415 *
1416 * @internal
1417 */
1418int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
1419{
1420 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1421 Assert(PGMIsLocked(pVM));
1422 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1423
1424 /*
1425 * Get the mapping address.
1426 */
1427#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1428 void *pv;
1429 int rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1430 PGM_PAGE_GET_HCPHYS(pPage),
1431 &pv
1432 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1433 if (RT_FAILURE(rc))
1434 return rc;
1435 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1436#else
1437 PPGMPAGEMAPTLBE pTlbe;
1438 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1439 if (RT_FAILURE(rc))
1440 return rc;
1441 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1442#endif
1443 return VINF_SUCCESS;
1444}
1445
1446
1447/**
1448 * Requests the mapping of a guest page into the current context.
1449 *
1450 * This API should only be used for very short term, as it will consume
1451 * scarse resources (R0 and GC) in the mapping cache. When you're done
1452 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1453 *
1454 * This API will assume your intention is to write to the page, and will
1455 * therefore replace shared and zero pages. If you do not intend to modify
1456 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1457 *
1458 * @returns VBox status code.
1459 * @retval VINF_SUCCESS on success.
1460 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1461 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1462 *
1463 * @param pVM The VM handle.
1464 * @param GCPhys The guest physical address of the page that should be mapped.
1465 * @param ppv Where to store the address corresponding to GCPhys.
1466 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1467 *
1468 * @remarks The caller is responsible for dealing with access handlers.
1469 * @todo Add an informational return code for pages with access handlers?
1470 *
1471 * @remark Avoid calling this API from within critical sections (other than the
1472 * PGM one) because of the deadlock risk. External threads may need to
1473 * delegate jobs to the EMTs.
1474 * @thread Any thread.
1475 */
1476VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1477{
1478 int rc = pgmLock(pVM);
1479 AssertRCReturn(rc, rc);
1480
1481#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1482 /*
1483 * Find the page and make sure it's writable.
1484 */
1485 PPGMPAGE pPage;
1486 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1487 if (RT_SUCCESS(rc))
1488 {
1489 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1490 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1491 if (RT_SUCCESS(rc))
1492 {
1493 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1494
1495 PVMCPU pVCpu = VMMGetCpu(pVM);
1496 void *pv;
1497 rc = pgmRZDynMapHCPageInlined(pVCpu,
1498 PGM_PAGE_GET_HCPHYS(pPage),
1499 &pv
1500 RTLOG_COMMA_SRC_POS);
1501 if (RT_SUCCESS(rc))
1502 {
1503 AssertRCSuccess(rc);
1504
1505 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1506 *ppv = pv;
1507 pLock->pvPage = pv;
1508 pLock->pVCpu = pVCpu;
1509 }
1510 }
1511 }
1512
1513#else /* IN_RING3 || IN_RING0 */
1514 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1515 /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary. */
1516 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1517
1518 /*
1519 * Query the Physical TLB entry for the page (may fail).
1520 */
1521 PPGMPAGEMAPTLBE pTlbe;
1522 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1523 if (RT_SUCCESS(rc))
1524 {
1525 /*
1526 * If the page is shared, the zero page, or being write monitored
1527 * it must be converted to a page that's writable if possible.
1528 */
1529 PPGMPAGE pPage = pTlbe->pPage;
1530 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1531 {
1532 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1533 if (RT_SUCCESS(rc))
1534 {
1535 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1536 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1537 }
1538 }
1539 if (RT_SUCCESS(rc))
1540 {
1541 /*
1542 * Now, just perform the locking and calculate the return address.
1543 */
1544 PPGMPAGEMAP pMap = pTlbe->pMap;
1545 if (pMap)
1546 pMap->cRefs++;
1547
1548 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1549 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1550 {
1551 if (cLocks == 0)
1552 pVM->pgm.s.cWriteLockedPages++;
1553 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1554 }
1555 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1556 {
1557 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1558 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1559 if (pMap)
1560 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1561 }
1562
1563 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1564 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1565 pLock->pvMap = pMap;
1566 }
1567 }
1568
1569#endif /* IN_RING3 || IN_RING0 */
1570 pgmUnlock(pVM);
1571 return rc;
1572}
1573
1574
1575/**
1576 * Requests the mapping of a guest page into the current context.
1577 *
1578 * This API should only be used for very short term, as it will consume
1579 * scarse resources (R0 and GC) in the mapping cache. When you're done
1580 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1581 *
1582 * @returns VBox status code.
1583 * @retval VINF_SUCCESS on success.
1584 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1585 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1586 *
1587 * @param pVM The VM handle.
1588 * @param GCPhys The guest physical address of the page that should be mapped.
1589 * @param ppv Where to store the address corresponding to GCPhys.
1590 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1591 *
1592 * @remarks The caller is responsible for dealing with access handlers.
1593 * @todo Add an informational return code for pages with access handlers?
1594 *
1595 * @remark Avoid calling this API from within critical sections (other than
1596 * the PGM one) because of the deadlock risk.
1597 * @thread Any thread.
1598 */
1599VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1600{
1601 int rc = pgmLock(pVM);
1602 AssertRCReturn(rc, rc);
1603
1604#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1605 /*
1606 * Find the page and make sure it's readable.
1607 */
1608 PPGMPAGE pPage;
1609 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1610 if (RT_SUCCESS(rc))
1611 {
1612 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1613 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1614 else
1615 {
1616 PVMCPU pVCpu = VMMGetCpu(pVM);
1617 void *pv;
1618 rc = pgmRZDynMapHCPageInlined(pVCpu,
1619 PGM_PAGE_GET_HCPHYS(pPage),
1620 &pv
1621 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1622 if (RT_SUCCESS(rc))
1623 {
1624 AssertRCSuccess(rc);
1625
1626 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1627 *ppv = pv;
1628 pLock->pvPage = pv;
1629 pLock->pVCpu = pVCpu;
1630 }
1631 }
1632 }
1633
1634#else /* IN_RING3 || IN_RING0 */
1635
1636 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1637 /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary. */
1638 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1639
1640 /*
1641 * Query the Physical TLB entry for the page (may fail).
1642 */
1643 PPGMPAGEMAPTLBE pTlbe;
1644 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1645 if (RT_SUCCESS(rc))
1646 {
1647 /* MMIO pages doesn't have any readable backing. */
1648 PPGMPAGE pPage = pTlbe->pPage;
1649 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1650 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1651 else
1652 {
1653 /*
1654 * Now, just perform the locking and calculate the return address.
1655 */
1656 PPGMPAGEMAP pMap = pTlbe->pMap;
1657 if (pMap)
1658 pMap->cRefs++;
1659
1660 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1661 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1662 {
1663 if (cLocks == 0)
1664 pVM->pgm.s.cReadLockedPages++;
1665 PGM_PAGE_INC_READ_LOCKS(pPage);
1666 }
1667 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1668 {
1669 PGM_PAGE_INC_READ_LOCKS(pPage);
1670 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1671 if (pMap)
1672 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1673 }
1674
1675 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1676 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1677 pLock->pvMap = pMap;
1678 }
1679 }
1680
1681#endif /* IN_RING3 || IN_RING0 */
1682 pgmUnlock(pVM);
1683 return rc;
1684}
1685
1686
1687/**
1688 * Requests the mapping of a guest page given by virtual address into the current context.
1689 *
1690 * This API should only be used for very short term, as it will consume
1691 * scarse resources (R0 and GC) in the mapping cache. When you're done
1692 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1693 *
1694 * This API will assume your intention is to write to the page, and will
1695 * therefore replace shared and zero pages. If you do not intend to modify
1696 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1697 *
1698 * @returns VBox status code.
1699 * @retval VINF_SUCCESS on success.
1700 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1701 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1702 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1703 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1704 *
1705 * @param pVCpu VMCPU handle.
1706 * @param GCPhys The guest physical address of the page that should be mapped.
1707 * @param ppv Where to store the address corresponding to GCPhys.
1708 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1709 *
1710 * @remark Avoid calling this API from within critical sections (other than
1711 * the PGM one) because of the deadlock risk.
1712 * @thread EMT
1713 */
1714VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1715{
1716 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1717 RTGCPHYS GCPhys;
1718 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1719 if (RT_SUCCESS(rc))
1720 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1721 return rc;
1722}
1723
1724
1725/**
1726 * Requests the mapping of a guest page given by virtual address into the current context.
1727 *
1728 * This API should only be used for very short term, as it will consume
1729 * scarse resources (R0 and GC) in the mapping cache. When you're done
1730 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1731 *
1732 * @returns VBox status code.
1733 * @retval VINF_SUCCESS on success.
1734 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1735 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1736 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1737 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1738 *
1739 * @param pVCpu VMCPU handle.
1740 * @param GCPhys The guest physical address of the page that should be mapped.
1741 * @param ppv Where to store the address corresponding to GCPhys.
1742 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1743 *
1744 * @remark Avoid calling this API from within critical sections (other than
1745 * the PGM one) because of the deadlock risk.
1746 * @thread EMT
1747 */
1748VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1749{
1750 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1751 RTGCPHYS GCPhys;
1752 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1753 if (RT_SUCCESS(rc))
1754 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1755 return rc;
1756}
1757
1758
1759/**
1760 * Release the mapping of a guest page.
1761 *
1762 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1763 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1764 *
1765 * @param pVM The VM handle.
1766 * @param pLock The lock structure initialized by the mapping function.
1767 */
1768VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1769{
1770#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1771 Assert(pLock->pvPage != NULL);
1772 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1773 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1774 pLock->pVCpu = NULL;
1775 pLock->pvPage = NULL;
1776
1777#else
1778 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1779 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1780 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1781
1782 pLock->uPageAndType = 0;
1783 pLock->pvMap = NULL;
1784
1785 pgmLock(pVM);
1786 if (fWriteLock)
1787 {
1788 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1789 Assert(cLocks > 0);
1790 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1791 {
1792 if (cLocks == 1)
1793 {
1794 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1795 pVM->pgm.s.cWriteLockedPages--;
1796 }
1797 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1798 }
1799
1800 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1801 {
1802 PGM_PAGE_SET_WRITTEN_TO(pPage);
1803 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1804 Assert(pVM->pgm.s.cMonitoredPages > 0);
1805 pVM->pgm.s.cMonitoredPages--;
1806 pVM->pgm.s.cWrittenToPages++;
1807 }
1808 }
1809 else
1810 {
1811 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1812 Assert(cLocks > 0);
1813 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1814 {
1815 if (cLocks == 1)
1816 {
1817 Assert(pVM->pgm.s.cReadLockedPages > 0);
1818 pVM->pgm.s.cReadLockedPages--;
1819 }
1820 PGM_PAGE_DEC_READ_LOCKS(pPage);
1821 }
1822 }
1823
1824 if (pMap)
1825 {
1826 Assert(pMap->cRefs >= 1);
1827 pMap->cRefs--;
1828 pMap->iAge = 0;
1829 }
1830 pgmUnlock(pVM);
1831#endif /* IN_RING3 */
1832}
1833
1834
1835/**
1836 * Converts a GC physical address to a HC ring-3 pointer.
1837 *
1838 * @returns VINF_SUCCESS on success.
1839 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1840 * page but has no physical backing.
1841 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1842 * GC physical address.
1843 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1844 * a dynamic ram chunk boundary
1845 *
1846 * @param pVM The VM handle.
1847 * @param GCPhys The GC physical address to convert.
1848 * @param cbRange Physical range
1849 * @param pR3Ptr Where to store the R3 pointer on success.
1850 *
1851 * @deprecated Avoid when possible!
1852 */
1853VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1854{
1855/** @todo this is kind of hacky and needs some more work. */
1856#ifndef DEBUG_sandervl
1857 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1858#endif
1859
1860 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1861#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1862 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1863#else
1864 pgmLock(pVM);
1865
1866 PPGMRAMRANGE pRam;
1867 PPGMPAGE pPage;
1868 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1869 if (RT_SUCCESS(rc))
1870 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1871
1872 pgmUnlock(pVM);
1873 Assert(rc <= VINF_SUCCESS);
1874 return rc;
1875#endif
1876}
1877
1878
1879#ifdef VBOX_STRICT
1880/**
1881 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1882 *
1883 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1884 * @param pVM The VM handle.
1885 * @param GCPhys The GC Physical address.
1886 * @param cbRange Physical range.
1887 *
1888 * @deprecated Avoid when possible.
1889 */
1890VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1891{
1892 RTR3PTR R3Ptr;
1893 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1894 if (RT_SUCCESS(rc))
1895 return R3Ptr;
1896 return NIL_RTR3PTR;
1897}
1898#endif /* VBOX_STRICT */
1899
1900
1901/**
1902 * Converts a guest pointer to a GC physical address.
1903 *
1904 * This uses the current CR3/CR0/CR4 of the guest.
1905 *
1906 * @returns VBox status code.
1907 * @param pVCpu The VMCPU Handle
1908 * @param GCPtr The guest pointer to convert.
1909 * @param pGCPhys Where to store the GC physical address.
1910 */
1911VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1912{
1913 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1914 if (pGCPhys && RT_SUCCESS(rc))
1915 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1916 return rc;
1917}
1918
1919
1920/**
1921 * Converts a guest pointer to a HC physical address.
1922 *
1923 * This uses the current CR3/CR0/CR4 of the guest.
1924 *
1925 * @returns VBox status code.
1926 * @param pVCpu The VMCPU Handle
1927 * @param GCPtr The guest pointer to convert.
1928 * @param pHCPhys Where to store the HC physical address.
1929 */
1930VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1931{
1932 PVM pVM = pVCpu->CTX_SUFF(pVM);
1933 RTGCPHYS GCPhys;
1934 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1935 if (RT_SUCCESS(rc))
1936 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1937 return rc;
1938}
1939
1940
1941
1942#undef LOG_GROUP
1943#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1944
1945
1946#ifdef IN_RING3
1947/**
1948 * Cache PGMPhys memory access
1949 *
1950 * @param pVM VM Handle.
1951 * @param pCache Cache structure pointer
1952 * @param GCPhys GC physical address
1953 * @param pbHC HC pointer corresponding to physical page
1954 *
1955 * @thread EMT.
1956 */
1957static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1958{
1959 uint32_t iCacheIndex;
1960
1961 Assert(VM_IS_EMT(pVM));
1962
1963 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1964 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1965
1966 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1967
1968 ASMBitSet(&pCache->aEntries, iCacheIndex);
1969
1970 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1971 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1972}
1973#endif /* IN_RING3 */
1974
1975
1976/**
1977 * Deals with reading from a page with one or more ALL access handlers.
1978 *
1979 * @returns VBox status code. Can be ignored in ring-3.
1980 * @retval VINF_SUCCESS.
1981 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1982 *
1983 * @param pVM The VM handle.
1984 * @param pPage The page descriptor.
1985 * @param GCPhys The physical address to start reading at.
1986 * @param pvBuf Where to put the bits we read.
1987 * @param cb How much to read - less or equal to a page.
1988 */
1989static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1990{
1991 /*
1992 * The most frequent access here is MMIO and shadowed ROM.
1993 * The current code ASSUMES all these access handlers covers full pages!
1994 */
1995
1996 /*
1997 * Whatever we do we need the source page, map it first.
1998 */
1999 const void *pvSrc = NULL;
2000 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
2001 if (RT_FAILURE(rc))
2002 {
2003 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2004 GCPhys, pPage, rc));
2005 memset(pvBuf, 0xff, cb);
2006 return VINF_SUCCESS;
2007 }
2008 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2009
2010 /*
2011 * Deal with any physical handlers.
2012 */
2013 PPGMPHYSHANDLER pPhys = NULL;
2014 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
2015 {
2016#ifdef IN_RING3
2017 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2018 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2019 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2020 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2021 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2022 Assert(pPhys->CTX_SUFF(pfnHandler));
2023
2024 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2025 void *pvUser = pPhys->CTX_SUFF(pvUser);
2026
2027 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2028 STAM_PROFILE_START(&pPhys->Stat, h);
2029 Assert(PGMIsLockOwner(pVM));
2030 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2031 pgmUnlock(pVM);
2032 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2033 pgmLock(pVM);
2034# ifdef VBOX_WITH_STATISTICS
2035 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2036 if (pPhys)
2037 STAM_PROFILE_STOP(&pPhys->Stat, h);
2038# else
2039 pPhys = NULL; /* might not be valid anymore. */
2040# endif
2041 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2042#else
2043 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2044 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2045 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2046#endif
2047 }
2048
2049 /*
2050 * Deal with any virtual handlers.
2051 */
2052 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2053 {
2054 unsigned iPage;
2055 PPGMVIRTHANDLER pVirt;
2056
2057 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2058 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2059 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2060 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2061 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2062
2063#ifdef IN_RING3
2064 if (pVirt->pfnHandlerR3)
2065 {
2066 if (!pPhys)
2067 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2068 else
2069 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2070 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2071 + (iPage << PAGE_SHIFT)
2072 + (GCPhys & PAGE_OFFSET_MASK);
2073
2074 STAM_PROFILE_START(&pVirt->Stat, h);
2075 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
2076 STAM_PROFILE_STOP(&pVirt->Stat, h);
2077 if (rc2 == VINF_SUCCESS)
2078 rc = VINF_SUCCESS;
2079 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2080 }
2081 else
2082 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2083#else
2084 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2085 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2086 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2087#endif
2088 }
2089
2090 /*
2091 * Take the default action.
2092 */
2093 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2094 memcpy(pvBuf, pvSrc, cb);
2095 return rc;
2096}
2097
2098
2099/**
2100 * Read physical memory.
2101 *
2102 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2103 * want to ignore those.
2104 *
2105 * @returns VBox status code. Can be ignored in ring-3.
2106 * @retval VINF_SUCCESS.
2107 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2108 *
2109 * @param pVM VM Handle.
2110 * @param GCPhys Physical address start reading from.
2111 * @param pvBuf Where to put the read bits.
2112 * @param cbRead How many bytes to read.
2113 */
2114VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
2115{
2116 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2117 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2118
2119 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2120 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2121
2122 pgmLock(pVM);
2123
2124 /*
2125 * Copy loop on ram ranges.
2126 */
2127 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2128 for (;;)
2129 {
2130 /* Inside range or not? */
2131 if (pRam && GCPhys >= pRam->GCPhys)
2132 {
2133 /*
2134 * Must work our way thru this page by page.
2135 */
2136 RTGCPHYS off = GCPhys - pRam->GCPhys;
2137 while (off < pRam->cb)
2138 {
2139 unsigned iPage = off >> PAGE_SHIFT;
2140 PPGMPAGE pPage = &pRam->aPages[iPage];
2141 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2142 if (cb > cbRead)
2143 cb = cbRead;
2144
2145 /*
2146 * Any ALL access handlers?
2147 */
2148 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
2149 {
2150 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2151 if (RT_FAILURE(rc))
2152 {
2153 pgmUnlock(pVM);
2154 return rc;
2155 }
2156 }
2157 else
2158 {
2159 /*
2160 * Get the pointer to the page.
2161 */
2162 const void *pvSrc;
2163 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
2164 if (RT_SUCCESS(rc))
2165 memcpy(pvBuf, pvSrc, cb);
2166 else
2167 {
2168 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2169 pRam->GCPhys + off, pPage, rc));
2170 memset(pvBuf, 0xff, cb);
2171 }
2172 }
2173
2174 /* next page */
2175 if (cb >= cbRead)
2176 {
2177 pgmUnlock(pVM);
2178 return VINF_SUCCESS;
2179 }
2180 cbRead -= cb;
2181 off += cb;
2182 pvBuf = (char *)pvBuf + cb;
2183 } /* walk pages in ram range. */
2184
2185 GCPhys = pRam->GCPhysLast + 1;
2186 }
2187 else
2188 {
2189 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2190
2191 /*
2192 * Unassigned address space.
2193 */
2194 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2195 if (cb >= cbRead)
2196 {
2197 memset(pvBuf, 0xff, cbRead);
2198 break;
2199 }
2200 memset(pvBuf, 0xff, cb);
2201
2202 cbRead -= cb;
2203 pvBuf = (char *)pvBuf + cb;
2204 GCPhys += cb;
2205 }
2206
2207 /* Advance range if necessary. */
2208 while (pRam && GCPhys > pRam->GCPhysLast)
2209 pRam = pRam->CTX_SUFF(pNext);
2210 } /* Ram range walk */
2211
2212 pgmUnlock(pVM);
2213 return VINF_SUCCESS;
2214}
2215
2216
2217/**
2218 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2219 *
2220 * @returns VBox status code. Can be ignored in ring-3.
2221 * @retval VINF_SUCCESS.
2222 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2223 *
2224 * @param pVM The VM handle.
2225 * @param pPage The page descriptor.
2226 * @param GCPhys The physical address to start writing at.
2227 * @param pvBuf What to write.
2228 * @param cbWrite How much to write - less or equal to a page.
2229 */
2230static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2231{
2232 void *pvDst = NULL;
2233 int rc;
2234
2235 /*
2236 * Give priority to physical handlers (like #PF does).
2237 *
2238 * Hope for a lonely physical handler first that covers the whole
2239 * write area. This should be a pretty frequent case with MMIO and
2240 * the heavy usage of full page handlers in the page pool.
2241 */
2242 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2243 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
2244 {
2245 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2246 if (pCur)
2247 {
2248 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2249 Assert(pCur->CTX_SUFF(pfnHandler));
2250
2251 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2252 if (cbRange > cbWrite)
2253 cbRange = cbWrite;
2254
2255#ifndef IN_RING3
2256 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2257 NOREF(cbRange);
2258 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2259 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2260
2261#else /* IN_RING3 */
2262 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2263 if (!PGM_PAGE_IS_MMIO(pPage))
2264 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2265 else
2266 rc = VINF_SUCCESS;
2267 if (RT_SUCCESS(rc))
2268 {
2269 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2270 void *pvUser = pCur->CTX_SUFF(pvUser);
2271
2272 STAM_PROFILE_START(&pCur->Stat, h);
2273 Assert(PGMIsLockOwner(pVM));
2274 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2275 pgmUnlock(pVM);
2276 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2277 pgmLock(pVM);
2278# ifdef VBOX_WITH_STATISTICS
2279 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2280 if (pCur)
2281 STAM_PROFILE_STOP(&pCur->Stat, h);
2282# else
2283 pCur = NULL; /* might not be valid anymore. */
2284# endif
2285 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2286 memcpy(pvDst, pvBuf, cbRange);
2287 else
2288 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2289 }
2290 else
2291 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2292 GCPhys, pPage, rc), rc);
2293 if (RT_LIKELY(cbRange == cbWrite))
2294 return VINF_SUCCESS;
2295
2296 /* more fun to be had below */
2297 cbWrite -= cbRange;
2298 GCPhys += cbRange;
2299 pvBuf = (uint8_t *)pvBuf + cbRange;
2300 pvDst = (uint8_t *)pvDst + cbRange;
2301#endif /* IN_RING3 */
2302 }
2303 /* else: the handler is somewhere else in the page, deal with it below. */
2304 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2305 }
2306 /*
2307 * A virtual handler without any interfering physical handlers.
2308 * Hopefully it'll convert the whole write.
2309 */
2310 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2311 {
2312 unsigned iPage;
2313 PPGMVIRTHANDLER pCur;
2314 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2315 if (RT_SUCCESS(rc))
2316 {
2317 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2318 if (cbRange > cbWrite)
2319 cbRange = cbWrite;
2320
2321#ifndef IN_RING3
2322 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2323 NOREF(cbRange);
2324 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2325 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2326
2327#else /* IN_RING3 */
2328
2329 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2330 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2331 if (RT_SUCCESS(rc))
2332 {
2333 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2334 if (pCur->pfnHandlerR3)
2335 {
2336 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2337 + (iPage << PAGE_SHIFT)
2338 + (GCPhys & PAGE_OFFSET_MASK);
2339
2340 STAM_PROFILE_START(&pCur->Stat, h);
2341 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2342 STAM_PROFILE_STOP(&pCur->Stat, h);
2343 }
2344 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2345 memcpy(pvDst, pvBuf, cbRange);
2346 else
2347 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2348 }
2349 else
2350 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2351 GCPhys, pPage, rc), rc);
2352 if (RT_LIKELY(cbRange == cbWrite))
2353 return VINF_SUCCESS;
2354
2355 /* more fun to be had below */
2356 cbWrite -= cbRange;
2357 GCPhys += cbRange;
2358 pvBuf = (uint8_t *)pvBuf + cbRange;
2359 pvDst = (uint8_t *)pvDst + cbRange;
2360#endif
2361 }
2362 /* else: the handler is somewhere else in the page, deal with it below. */
2363 }
2364
2365 /*
2366 * Deal with all the odd ends.
2367 */
2368
2369 /* We need a writable destination page. */
2370 if (!pvDst)
2371 {
2372 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2373 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2374 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2375 GCPhys, pPage, rc), rc);
2376 }
2377
2378 /* The loop state (big + ugly). */
2379 unsigned iVirtPage = 0;
2380 PPGMVIRTHANDLER pVirt = NULL;
2381 uint32_t offVirt = PAGE_SIZE;
2382 uint32_t offVirtLast = PAGE_SIZE;
2383 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2384
2385 PPGMPHYSHANDLER pPhys = NULL;
2386 uint32_t offPhys = PAGE_SIZE;
2387 uint32_t offPhysLast = PAGE_SIZE;
2388 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2389
2390 /* The loop. */
2391 for (;;)
2392 {
2393 /*
2394 * Find the closest handler at or above GCPhys.
2395 */
2396 if (fMoreVirt && !pVirt)
2397 {
2398 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2399 if (RT_SUCCESS(rc))
2400 {
2401 offVirt = 0;
2402 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2403 }
2404 else
2405 {
2406 PPGMPHYS2VIRTHANDLER pVirtPhys;
2407 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2408 GCPhys, true /* fAbove */);
2409 if ( pVirtPhys
2410 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2411 {
2412 /* ASSUME that pVirtPhys only covers one page. */
2413 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2414 Assert(pVirtPhys->Core.Key > GCPhys);
2415
2416 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2417 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2418 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2419 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2420 }
2421 else
2422 {
2423 pVirt = NULL;
2424 fMoreVirt = false;
2425 offVirt = offVirtLast = PAGE_SIZE;
2426 }
2427 }
2428 }
2429
2430 if (fMorePhys && !pPhys)
2431 {
2432 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2433 if (pPhys)
2434 {
2435 offPhys = 0;
2436 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2437 }
2438 else
2439 {
2440 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2441 GCPhys, true /* fAbove */);
2442 if ( pPhys
2443 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2444 {
2445 offPhys = pPhys->Core.Key - GCPhys;
2446 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2447 }
2448 else
2449 {
2450 pPhys = NULL;
2451 fMorePhys = false;
2452 offPhys = offPhysLast = PAGE_SIZE;
2453 }
2454 }
2455 }
2456
2457 /*
2458 * Handle access to space without handlers (that's easy).
2459 */
2460 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2461 uint32_t cbRange = (uint32_t)cbWrite;
2462 if (offPhys && offVirt)
2463 {
2464 if (cbRange > offPhys)
2465 cbRange = offPhys;
2466 if (cbRange > offVirt)
2467 cbRange = offVirt;
2468 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2469 }
2470 /*
2471 * Physical handler.
2472 */
2473 else if (!offPhys && offVirt)
2474 {
2475 if (cbRange > offPhysLast + 1)
2476 cbRange = offPhysLast + 1;
2477 if (cbRange > offVirt)
2478 cbRange = offVirt;
2479#ifdef IN_RING3
2480 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2481 void *pvUser = pPhys->CTX_SUFF(pvUser);
2482
2483 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2484 STAM_PROFILE_START(&pPhys->Stat, h);
2485 Assert(PGMIsLockOwner(pVM));
2486 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2487 pgmUnlock(pVM);
2488 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2489 pgmLock(pVM);
2490# ifdef VBOX_WITH_STATISTICS
2491 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2492 if (pPhys)
2493 STAM_PROFILE_STOP(&pPhys->Stat, h);
2494# else
2495 pPhys = NULL; /* might not be valid anymore. */
2496# endif
2497 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2498#else
2499 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2500 NOREF(cbRange);
2501 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2502 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2503#endif
2504 }
2505 /*
2506 * Virtual handler.
2507 */
2508 else if (offPhys && !offVirt)
2509 {
2510 if (cbRange > offVirtLast + 1)
2511 cbRange = offVirtLast + 1;
2512 if (cbRange > offPhys)
2513 cbRange = offPhys;
2514#ifdef IN_RING3
2515 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2516 if (pVirt->pfnHandlerR3)
2517 {
2518 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2519 + (iVirtPage << PAGE_SHIFT)
2520 + (GCPhys & PAGE_OFFSET_MASK);
2521 STAM_PROFILE_START(&pVirt->Stat, h);
2522 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2523 STAM_PROFILE_STOP(&pVirt->Stat, h);
2524 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2525 }
2526 pVirt = NULL;
2527#else
2528 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2529 NOREF(cbRange);
2530 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2531 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2532#endif
2533 }
2534 /*
2535 * Both... give the physical one priority.
2536 */
2537 else
2538 {
2539 Assert(!offPhys && !offVirt);
2540 if (cbRange > offVirtLast + 1)
2541 cbRange = offVirtLast + 1;
2542 if (cbRange > offPhysLast + 1)
2543 cbRange = offPhysLast + 1;
2544
2545#ifdef IN_RING3
2546 if (pVirt->pfnHandlerR3)
2547 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2548 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2549
2550 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2551 void *pvUser = pPhys->CTX_SUFF(pvUser);
2552
2553 STAM_PROFILE_START(&pPhys->Stat, h);
2554 Assert(PGMIsLockOwner(pVM));
2555 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2556 pgmUnlock(pVM);
2557 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2558 pgmLock(pVM);
2559# ifdef VBOX_WITH_STATISTICS
2560 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2561 if (pPhys)
2562 STAM_PROFILE_STOP(&pPhys->Stat, h);
2563# else
2564 pPhys = NULL; /* might not be valid anymore. */
2565# endif
2566 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2567 if (pVirt->pfnHandlerR3)
2568 {
2569
2570 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2571 + (iVirtPage << PAGE_SHIFT)
2572 + (GCPhys & PAGE_OFFSET_MASK);
2573 STAM_PROFILE_START(&pVirt->Stat, h2);
2574 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2575 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2576 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2577 rc = VINF_SUCCESS;
2578 else
2579 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2580 }
2581 pPhys = NULL;
2582 pVirt = NULL;
2583#else
2584 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2585 NOREF(cbRange);
2586 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2587 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2588#endif
2589 }
2590 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2591 memcpy(pvDst, pvBuf, cbRange);
2592
2593 /*
2594 * Advance if we've got more stuff to do.
2595 */
2596 if (cbRange >= cbWrite)
2597 return VINF_SUCCESS;
2598
2599 cbWrite -= cbRange;
2600 GCPhys += cbRange;
2601 pvBuf = (uint8_t *)pvBuf + cbRange;
2602 pvDst = (uint8_t *)pvDst + cbRange;
2603
2604 offPhys -= cbRange;
2605 offPhysLast -= cbRange;
2606 offVirt -= cbRange;
2607 offVirtLast -= cbRange;
2608 }
2609}
2610
2611
2612/**
2613 * Write to physical memory.
2614 *
2615 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2616 * want to ignore those.
2617 *
2618 * @returns VBox status code. Can be ignored in ring-3.
2619 * @retval VINF_SUCCESS.
2620 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2621 *
2622 * @param pVM VM Handle.
2623 * @param GCPhys Physical address to write to.
2624 * @param pvBuf What to write.
2625 * @param cbWrite How many bytes to write.
2626 */
2627VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2628{
2629 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2630 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2631 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2632
2633 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2634 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2635
2636 pgmLock(pVM);
2637
2638 /*
2639 * Copy loop on ram ranges.
2640 */
2641 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2642 for (;;)
2643 {
2644 /* Inside range or not? */
2645 if (pRam && GCPhys >= pRam->GCPhys)
2646 {
2647 /*
2648 * Must work our way thru this page by page.
2649 */
2650 RTGCPTR off = GCPhys - pRam->GCPhys;
2651 while (off < pRam->cb)
2652 {
2653 RTGCPTR iPage = off >> PAGE_SHIFT;
2654 PPGMPAGE pPage = &pRam->aPages[iPage];
2655 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2656 if (cb > cbWrite)
2657 cb = cbWrite;
2658
2659 /*
2660 * Any active WRITE or ALL access handlers?
2661 */
2662 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2663 {
2664 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2665 if (RT_FAILURE(rc))
2666 {
2667 pgmUnlock(pVM);
2668 return rc;
2669 }
2670 }
2671 else
2672 {
2673 /*
2674 * Get the pointer to the page.
2675 */
2676 void *pvDst;
2677 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2678 if (RT_SUCCESS(rc))
2679 {
2680 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2681 memcpy(pvDst, pvBuf, cb);
2682 }
2683 else
2684 /* Ignore writes to ballooned pages. */
2685 if (!PGM_PAGE_IS_BALLOONED(pPage))
2686 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2687 pRam->GCPhys + off, pPage, rc));
2688 }
2689
2690 /* next page */
2691 if (cb >= cbWrite)
2692 {
2693 pgmUnlock(pVM);
2694 return VINF_SUCCESS;
2695 }
2696
2697 cbWrite -= cb;
2698 off += cb;
2699 pvBuf = (const char *)pvBuf + cb;
2700 } /* walk pages in ram range */
2701
2702 GCPhys = pRam->GCPhysLast + 1;
2703 }
2704 else
2705 {
2706 /*
2707 * Unassigned address space, skip it.
2708 */
2709 if (!pRam)
2710 break;
2711 size_t cb = pRam->GCPhys - GCPhys;
2712 if (cb >= cbWrite)
2713 break;
2714 cbWrite -= cb;
2715 pvBuf = (const char *)pvBuf + cb;
2716 GCPhys += cb;
2717 }
2718
2719 /* Advance range if necessary. */
2720 while (pRam && GCPhys > pRam->GCPhysLast)
2721 pRam = pRam->CTX_SUFF(pNext);
2722 } /* Ram range walk */
2723
2724 pgmUnlock(pVM);
2725 return VINF_SUCCESS;
2726}
2727
2728
2729/**
2730 * Read from guest physical memory by GC physical address, bypassing
2731 * MMIO and access handlers.
2732 *
2733 * @returns VBox status.
2734 * @param pVM VM handle.
2735 * @param pvDst The destination address.
2736 * @param GCPhysSrc The source address (GC physical address).
2737 * @param cb The number of bytes to read.
2738 */
2739VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2740{
2741 /*
2742 * Treat the first page as a special case.
2743 */
2744 if (!cb)
2745 return VINF_SUCCESS;
2746
2747 /* map the 1st page */
2748 void const *pvSrc;
2749 PGMPAGEMAPLOCK Lock;
2750 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2751 if (RT_FAILURE(rc))
2752 return rc;
2753
2754 /* optimize for the case where access is completely within the first page. */
2755 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2756 if (RT_LIKELY(cb <= cbPage))
2757 {
2758 memcpy(pvDst, pvSrc, cb);
2759 PGMPhysReleasePageMappingLock(pVM, &Lock);
2760 return VINF_SUCCESS;
2761 }
2762
2763 /* copy to the end of the page. */
2764 memcpy(pvDst, pvSrc, cbPage);
2765 PGMPhysReleasePageMappingLock(pVM, &Lock);
2766 GCPhysSrc += cbPage;
2767 pvDst = (uint8_t *)pvDst + cbPage;
2768 cb -= cbPage;
2769
2770 /*
2771 * Page by page.
2772 */
2773 for (;;)
2774 {
2775 /* map the page */
2776 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2777 if (RT_FAILURE(rc))
2778 return rc;
2779
2780 /* last page? */
2781 if (cb <= PAGE_SIZE)
2782 {
2783 memcpy(pvDst, pvSrc, cb);
2784 PGMPhysReleasePageMappingLock(pVM, &Lock);
2785 return VINF_SUCCESS;
2786 }
2787
2788 /* copy the entire page and advance */
2789 memcpy(pvDst, pvSrc, PAGE_SIZE);
2790 PGMPhysReleasePageMappingLock(pVM, &Lock);
2791 GCPhysSrc += PAGE_SIZE;
2792 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2793 cb -= PAGE_SIZE;
2794 }
2795 /* won't ever get here. */
2796}
2797
2798
2799/**
2800 * Write to guest physical memory referenced by GC pointer.
2801 * Write memory to GC physical address in guest physical memory.
2802 *
2803 * This will bypass MMIO and access handlers.
2804 *
2805 * @returns VBox status.
2806 * @param pVM VM handle.
2807 * @param GCPhysDst The GC physical address of the destination.
2808 * @param pvSrc The source buffer.
2809 * @param cb The number of bytes to write.
2810 */
2811VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2812{
2813 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2814
2815 /*
2816 * Treat the first page as a special case.
2817 */
2818 if (!cb)
2819 return VINF_SUCCESS;
2820
2821 /* map the 1st page */
2822 void *pvDst;
2823 PGMPAGEMAPLOCK Lock;
2824 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2825 if (RT_FAILURE(rc))
2826 return rc;
2827
2828 /* optimize for the case where access is completely within the first page. */
2829 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2830 if (RT_LIKELY(cb <= cbPage))
2831 {
2832 memcpy(pvDst, pvSrc, cb);
2833 PGMPhysReleasePageMappingLock(pVM, &Lock);
2834 return VINF_SUCCESS;
2835 }
2836
2837 /* copy to the end of the page. */
2838 memcpy(pvDst, pvSrc, cbPage);
2839 PGMPhysReleasePageMappingLock(pVM, &Lock);
2840 GCPhysDst += cbPage;
2841 pvSrc = (const uint8_t *)pvSrc + cbPage;
2842 cb -= cbPage;
2843
2844 /*
2845 * Page by page.
2846 */
2847 for (;;)
2848 {
2849 /* map the page */
2850 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2851 if (RT_FAILURE(rc))
2852 return rc;
2853
2854 /* last page? */
2855 if (cb <= PAGE_SIZE)
2856 {
2857 memcpy(pvDst, pvSrc, cb);
2858 PGMPhysReleasePageMappingLock(pVM, &Lock);
2859 return VINF_SUCCESS;
2860 }
2861
2862 /* copy the entire page and advance */
2863 memcpy(pvDst, pvSrc, PAGE_SIZE);
2864 PGMPhysReleasePageMappingLock(pVM, &Lock);
2865 GCPhysDst += PAGE_SIZE;
2866 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2867 cb -= PAGE_SIZE;
2868 }
2869 /* won't ever get here. */
2870}
2871
2872
2873/**
2874 * Read from guest physical memory referenced by GC pointer.
2875 *
2876 * This function uses the current CR3/CR0/CR4 of the guest and will
2877 * bypass access handlers and not set any accessed bits.
2878 *
2879 * @returns VBox status.
2880 * @param pVCpu The VMCPU handle.
2881 * @param pvDst The destination address.
2882 * @param GCPtrSrc The source address (GC pointer).
2883 * @param cb The number of bytes to read.
2884 */
2885VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2886{
2887 PVM pVM = pVCpu->CTX_SUFF(pVM);
2888
2889 /*
2890 * Treat the first page as a special case.
2891 */
2892 if (!cb)
2893 return VINF_SUCCESS;
2894
2895 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
2896 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2897
2898 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2899 * when many VCPUs are fighting for the lock.
2900 */
2901 pgmLock(pVM);
2902
2903 /* map the 1st page */
2904 void const *pvSrc;
2905 PGMPAGEMAPLOCK Lock;
2906 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2907 if (RT_FAILURE(rc))
2908 {
2909 pgmUnlock(pVM);
2910 return rc;
2911 }
2912
2913 /* optimize for the case where access is completely within the first page. */
2914 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2915 if (RT_LIKELY(cb <= cbPage))
2916 {
2917 memcpy(pvDst, pvSrc, cb);
2918 PGMPhysReleasePageMappingLock(pVM, &Lock);
2919 pgmUnlock(pVM);
2920 return VINF_SUCCESS;
2921 }
2922
2923 /* copy to the end of the page. */
2924 memcpy(pvDst, pvSrc, cbPage);
2925 PGMPhysReleasePageMappingLock(pVM, &Lock);
2926 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2927 pvDst = (uint8_t *)pvDst + cbPage;
2928 cb -= cbPage;
2929
2930 /*
2931 * Page by page.
2932 */
2933 for (;;)
2934 {
2935 /* map the page */
2936 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2937 if (RT_FAILURE(rc))
2938 {
2939 pgmUnlock(pVM);
2940 return rc;
2941 }
2942
2943 /* last page? */
2944 if (cb <= PAGE_SIZE)
2945 {
2946 memcpy(pvDst, pvSrc, cb);
2947 PGMPhysReleasePageMappingLock(pVM, &Lock);
2948 pgmUnlock(pVM);
2949 return VINF_SUCCESS;
2950 }
2951
2952 /* copy the entire page and advance */
2953 memcpy(pvDst, pvSrc, PAGE_SIZE);
2954 PGMPhysReleasePageMappingLock(pVM, &Lock);
2955 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2956 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2957 cb -= PAGE_SIZE;
2958 }
2959 /* won't ever get here. */
2960}
2961
2962
2963/**
2964 * Write to guest physical memory referenced by GC pointer.
2965 *
2966 * This function uses the current CR3/CR0/CR4 of the guest and will
2967 * bypass access handlers and not set dirty or accessed bits.
2968 *
2969 * @returns VBox status.
2970 * @param pVCpu The VMCPU handle.
2971 * @param GCPtrDst The destination address (GC pointer).
2972 * @param pvSrc The source address.
2973 * @param cb The number of bytes to write.
2974 */
2975VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2976{
2977 PVM pVM = pVCpu->CTX_SUFF(pVM);
2978
2979 /*
2980 * Treat the first page as a special case.
2981 */
2982 if (!cb)
2983 return VINF_SUCCESS;
2984
2985 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
2986 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2987
2988 /* map the 1st page */
2989 void *pvDst;
2990 PGMPAGEMAPLOCK Lock;
2991 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2992 if (RT_FAILURE(rc))
2993 return rc;
2994
2995 /* optimize for the case where access is completely within the first page. */
2996 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2997 if (RT_LIKELY(cb <= cbPage))
2998 {
2999 memcpy(pvDst, pvSrc, cb);
3000 PGMPhysReleasePageMappingLock(pVM, &Lock);
3001 return VINF_SUCCESS;
3002 }
3003
3004 /* copy to the end of the page. */
3005 memcpy(pvDst, pvSrc, cbPage);
3006 PGMPhysReleasePageMappingLock(pVM, &Lock);
3007 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3008 pvSrc = (const uint8_t *)pvSrc + cbPage;
3009 cb -= cbPage;
3010
3011 /*
3012 * Page by page.
3013 */
3014 for (;;)
3015 {
3016 /* map the page */
3017 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3018 if (RT_FAILURE(rc))
3019 return rc;
3020
3021 /* last page? */
3022 if (cb <= PAGE_SIZE)
3023 {
3024 memcpy(pvDst, pvSrc, cb);
3025 PGMPhysReleasePageMappingLock(pVM, &Lock);
3026 return VINF_SUCCESS;
3027 }
3028
3029 /* copy the entire page and advance */
3030 memcpy(pvDst, pvSrc, PAGE_SIZE);
3031 PGMPhysReleasePageMappingLock(pVM, &Lock);
3032 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3033 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3034 cb -= PAGE_SIZE;
3035 }
3036 /* won't ever get here. */
3037}
3038
3039
3040/**
3041 * Write to guest physical memory referenced by GC pointer and update the PTE.
3042 *
3043 * This function uses the current CR3/CR0/CR4 of the guest and will
3044 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3045 *
3046 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3047 *
3048 * @returns VBox status.
3049 * @param pVCpu The VMCPU handle.
3050 * @param GCPtrDst The destination address (GC pointer).
3051 * @param pvSrc The source address.
3052 * @param cb The number of bytes to write.
3053 */
3054VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3055{
3056 PVM pVM = pVCpu->CTX_SUFF(pVM);
3057
3058 /*
3059 * Treat the first page as a special case.
3060 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3061 */
3062 if (!cb)
3063 return VINF_SUCCESS;
3064
3065 /* map the 1st page */
3066 void *pvDst;
3067 PGMPAGEMAPLOCK Lock;
3068 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3069 if (RT_FAILURE(rc))
3070 return rc;
3071
3072 /* optimize for the case where access is completely within the first page. */
3073 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3074 if (RT_LIKELY(cb <= cbPage))
3075 {
3076 memcpy(pvDst, pvSrc, cb);
3077 PGMPhysReleasePageMappingLock(pVM, &Lock);
3078 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3079 return VINF_SUCCESS;
3080 }
3081
3082 /* copy to the end of the page. */
3083 memcpy(pvDst, pvSrc, cbPage);
3084 PGMPhysReleasePageMappingLock(pVM, &Lock);
3085 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3086 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3087 pvSrc = (const uint8_t *)pvSrc + cbPage;
3088 cb -= cbPage;
3089
3090 /*
3091 * Page by page.
3092 */
3093 for (;;)
3094 {
3095 /* map the page */
3096 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3097 if (RT_FAILURE(rc))
3098 return rc;
3099
3100 /* last page? */
3101 if (cb <= PAGE_SIZE)
3102 {
3103 memcpy(pvDst, pvSrc, cb);
3104 PGMPhysReleasePageMappingLock(pVM, &Lock);
3105 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3106 return VINF_SUCCESS;
3107 }
3108
3109 /* copy the entire page and advance */
3110 memcpy(pvDst, pvSrc, PAGE_SIZE);
3111 PGMPhysReleasePageMappingLock(pVM, &Lock);
3112 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3113 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3114 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3115 cb -= PAGE_SIZE;
3116 }
3117 /* won't ever get here. */
3118}
3119
3120
3121/**
3122 * Read from guest physical memory referenced by GC pointer.
3123 *
3124 * This function uses the current CR3/CR0/CR4 of the guest and will
3125 * respect access handlers and set accessed bits.
3126 *
3127 * @returns VBox status.
3128 * @param pVCpu The VMCPU handle.
3129 * @param pvDst The destination address.
3130 * @param GCPtrSrc The source address (GC pointer).
3131 * @param cb The number of bytes to read.
3132 * @thread The vCPU EMT.
3133 */
3134VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3135{
3136 RTGCPHYS GCPhys;
3137 uint64_t fFlags;
3138 int rc;
3139 PVM pVM = pVCpu->CTX_SUFF(pVM);
3140
3141 /*
3142 * Anything to do?
3143 */
3144 if (!cb)
3145 return VINF_SUCCESS;
3146
3147 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3148
3149 /*
3150 * Optimize reads within a single page.
3151 */
3152 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3153 {
3154 /* Convert virtual to physical address + flags */
3155 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3156 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3157 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3158
3159 /* mark the guest page as accessed. */
3160 if (!(fFlags & X86_PTE_A))
3161 {
3162 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3163 AssertRC(rc);
3164 }
3165
3166 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3167 }
3168
3169 /*
3170 * Page by page.
3171 */
3172 for (;;)
3173 {
3174 /* Convert virtual to physical address + flags */
3175 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3176 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3177 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3178
3179 /* mark the guest page as accessed. */
3180 if (!(fFlags & X86_PTE_A))
3181 {
3182 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3183 AssertRC(rc);
3184 }
3185
3186 /* copy */
3187 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3188 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3189 if (cbRead >= cb || RT_FAILURE(rc))
3190 return rc;
3191
3192 /* next */
3193 cb -= cbRead;
3194 pvDst = (uint8_t *)pvDst + cbRead;
3195 GCPtrSrc += cbRead;
3196 }
3197}
3198
3199
3200/**
3201 * Write to guest physical memory referenced by GC pointer.
3202 *
3203 * This function uses the current CR3/CR0/CR4 of the guest and will
3204 * respect access handlers and set dirty and accessed bits.
3205 *
3206 * @returns VBox status.
3207 * @retval VINF_SUCCESS.
3208 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3209 *
3210 * @param pVCpu The VMCPU handle.
3211 * @param GCPtrDst The destination address (GC pointer).
3212 * @param pvSrc The source address.
3213 * @param cb The number of bytes to write.
3214 */
3215VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3216{
3217 RTGCPHYS GCPhys;
3218 uint64_t fFlags;
3219 int rc;
3220 PVM pVM = pVCpu->CTX_SUFF(pVM);
3221
3222 /*
3223 * Anything to do?
3224 */
3225 if (!cb)
3226 return VINF_SUCCESS;
3227
3228 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3229
3230 /*
3231 * Optimize writes within a single page.
3232 */
3233 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3234 {
3235 /* Convert virtual to physical address + flags */
3236 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3237 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3238 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3239
3240 /* Mention when we ignore X86_PTE_RW... */
3241 if (!(fFlags & X86_PTE_RW))
3242 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3243
3244 /* Mark the guest page as accessed and dirty if necessary. */
3245 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3246 {
3247 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3248 AssertRC(rc);
3249 }
3250
3251 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3252 }
3253
3254 /*
3255 * Page by page.
3256 */
3257 for (;;)
3258 {
3259 /* Convert virtual to physical address + flags */
3260 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3261 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3262 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3263
3264 /* Mention when we ignore X86_PTE_RW... */
3265 if (!(fFlags & X86_PTE_RW))
3266 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3267
3268 /* Mark the guest page as accessed and dirty if necessary. */
3269 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3270 {
3271 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3272 AssertRC(rc);
3273 }
3274
3275 /* copy */
3276 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3277 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3278 if (cbWrite >= cb || RT_FAILURE(rc))
3279 return rc;
3280
3281 /* next */
3282 cb -= cbWrite;
3283 pvSrc = (uint8_t *)pvSrc + cbWrite;
3284 GCPtrDst += cbWrite;
3285 }
3286}
3287
3288
3289/**
3290 * Performs a read of guest virtual memory for instruction emulation.
3291 *
3292 * This will check permissions, raise exceptions and update the access bits.
3293 *
3294 * The current implementation will bypass all access handlers. It may later be
3295 * changed to at least respect MMIO.
3296 *
3297 *
3298 * @returns VBox status code suitable to scheduling.
3299 * @retval VINF_SUCCESS if the read was performed successfully.
3300 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3301 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3302 *
3303 * @param pVCpu The VMCPU handle.
3304 * @param pCtxCore The context core.
3305 * @param pvDst Where to put the bytes we've read.
3306 * @param GCPtrSrc The source address.
3307 * @param cb The number of bytes to read. Not more than a page.
3308 *
3309 * @remark This function will dynamically map physical pages in GC. This may unmap
3310 * mappings done by the caller. Be careful!
3311 */
3312VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3313{
3314 PVM pVM = pVCpu->CTX_SUFF(pVM);
3315 Assert(cb <= PAGE_SIZE);
3316
3317/** @todo r=bird: This isn't perfect!
3318 * -# It's not checking for reserved bits being 1.
3319 * -# It's not correctly dealing with the access bit.
3320 * -# It's not respecting MMIO memory or any other access handlers.
3321 */
3322 /*
3323 * 1. Translate virtual to physical. This may fault.
3324 * 2. Map the physical address.
3325 * 3. Do the read operation.
3326 * 4. Set access bits if required.
3327 */
3328 int rc;
3329 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3330 if (cb <= cb1)
3331 {
3332 /*
3333 * Not crossing pages.
3334 */
3335 RTGCPHYS GCPhys;
3336 uint64_t fFlags;
3337 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3338 if (RT_SUCCESS(rc))
3339 {
3340 /** @todo we should check reserved bits ... */
3341 void *pvSrc;
3342 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, &pvSrc);
3343 switch (rc)
3344 {
3345 case VINF_SUCCESS:
3346 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3347 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3348 break;
3349 case VERR_PGM_PHYS_PAGE_RESERVED:
3350 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3351 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
3352 break;
3353 default:
3354 return rc;
3355 }
3356
3357 /** @todo access bit emulation isn't 100% correct. */
3358 if (!(fFlags & X86_PTE_A))
3359 {
3360 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3361 AssertRC(rc);
3362 }
3363 return VINF_SUCCESS;
3364 }
3365 }
3366 else
3367 {
3368 /*
3369 * Crosses pages.
3370 */
3371 size_t cb2 = cb - cb1;
3372 uint64_t fFlags1;
3373 RTGCPHYS GCPhys1;
3374 uint64_t fFlags2;
3375 RTGCPHYS GCPhys2;
3376 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3377 if (RT_SUCCESS(rc))
3378 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3379 if (RT_SUCCESS(rc))
3380 {
3381 /** @todo we should check reserved bits ... */
3382 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3383 void *pvSrc1;
3384 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys1, &pvSrc1);
3385 switch (rc)
3386 {
3387 case VINF_SUCCESS:
3388 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3389 break;
3390 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3391 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
3392 break;
3393 default:
3394 return rc;
3395 }
3396
3397 void *pvSrc2;
3398 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys2, &pvSrc2);
3399 switch (rc)
3400 {
3401 case VINF_SUCCESS:
3402 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3403 break;
3404 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3405 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
3406 break;
3407 default:
3408 return rc;
3409 }
3410
3411 if (!(fFlags1 & X86_PTE_A))
3412 {
3413 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3414 AssertRC(rc);
3415 }
3416 if (!(fFlags2 & X86_PTE_A))
3417 {
3418 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3419 AssertRC(rc);
3420 }
3421 return VINF_SUCCESS;
3422 }
3423 }
3424
3425 /*
3426 * Raise a #PF.
3427 */
3428 uint32_t uErr;
3429
3430 /* Get the current privilege level. */
3431 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3432 switch (rc)
3433 {
3434 case VINF_SUCCESS:
3435 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3436 break;
3437
3438 case VERR_PAGE_NOT_PRESENT:
3439 case VERR_PAGE_TABLE_NOT_PRESENT:
3440 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3441 break;
3442
3443 default:
3444 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3445 return rc;
3446 }
3447 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3448 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3449}
3450
3451
3452/**
3453 * Performs a read of guest virtual memory for instruction emulation.
3454 *
3455 * This will check permissions, raise exceptions and update the access bits.
3456 *
3457 * The current implementation will bypass all access handlers. It may later be
3458 * changed to at least respect MMIO.
3459 *
3460 *
3461 * @returns VBox status code suitable to scheduling.
3462 * @retval VINF_SUCCESS if the read was performed successfully.
3463 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3464 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3465 *
3466 * @param pVCpu The VMCPU handle.
3467 * @param pCtxCore The context core.
3468 * @param pvDst Where to put the bytes we've read.
3469 * @param GCPtrSrc The source address.
3470 * @param cb The number of bytes to read. Not more than a page.
3471 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3472 * an appropriate error status will be returned (no
3473 * informational at all).
3474 *
3475 *
3476 * @remarks Takes the PGM lock.
3477 * @remarks A page fault on the 2nd page of the access will be raised without
3478 * writing the bits on the first page since we're ASSUMING that the
3479 * caller is emulating an instruction access.
3480 * @remarks This function will dynamically map physical pages in GC. This may
3481 * unmap mappings done by the caller. Be careful!
3482 */
3483VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3484{
3485 PVM pVM = pVCpu->CTX_SUFF(pVM);
3486 Assert(cb <= PAGE_SIZE);
3487
3488 /*
3489 * 1. Translate virtual to physical. This may fault.
3490 * 2. Map the physical address.
3491 * 3. Do the read operation.
3492 * 4. Set access bits if required.
3493 */
3494 int rc;
3495 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3496 if (cb <= cb1)
3497 {
3498 /*
3499 * Not crossing pages.
3500 */
3501 RTGCPHYS GCPhys;
3502 uint64_t fFlags;
3503 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3504 if (RT_SUCCESS(rc))
3505 {
3506 if (1) /** @todo we should check reserved bits ... */
3507 {
3508 const void *pvSrc;
3509 PGMPAGEMAPLOCK Lock;
3510 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3511 switch (rc)
3512 {
3513 case VINF_SUCCESS:
3514 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3515 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3516 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3517 PGMPhysReleasePageMappingLock(pVM, &Lock);
3518 break;
3519 case VERR_PGM_PHYS_PAGE_RESERVED:
3520 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3521 memset(pvDst, 0xff, cb);
3522 break;
3523 default:
3524 AssertMsgFailed(("%Rrc\n", rc));
3525 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3526 return rc;
3527 }
3528
3529 if (!(fFlags & X86_PTE_A))
3530 {
3531 /** @todo access bit emulation isn't 100% correct. */
3532 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3533 AssertRC(rc);
3534 }
3535 return VINF_SUCCESS;
3536 }
3537 }
3538 }
3539 else
3540 {
3541 /*
3542 * Crosses pages.
3543 */
3544 size_t cb2 = cb - cb1;
3545 uint64_t fFlags1;
3546 RTGCPHYS GCPhys1;
3547 uint64_t fFlags2;
3548 RTGCPHYS GCPhys2;
3549 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3550 if (RT_SUCCESS(rc))
3551 {
3552 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3553 if (RT_SUCCESS(rc))
3554 {
3555 if (1) /** @todo we should check reserved bits ... */
3556 {
3557 const void *pvSrc;
3558 PGMPAGEMAPLOCK Lock;
3559 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3560 switch (rc)
3561 {
3562 case VINF_SUCCESS:
3563 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3564 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3565 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3566 PGMPhysReleasePageMappingLock(pVM, &Lock);
3567 break;
3568 case VERR_PGM_PHYS_PAGE_RESERVED:
3569 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3570 memset(pvDst, 0xff, cb1);
3571 break;
3572 default:
3573 AssertMsgFailed(("%Rrc\n", rc));
3574 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3575 return rc;
3576 }
3577
3578 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3579 switch (rc)
3580 {
3581 case VINF_SUCCESS:
3582 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3583 PGMPhysReleasePageMappingLock(pVM, &Lock);
3584 break;
3585 case VERR_PGM_PHYS_PAGE_RESERVED:
3586 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3587 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3588 break;
3589 default:
3590 AssertMsgFailed(("%Rrc\n", rc));
3591 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3592 return rc;
3593 }
3594
3595 if (!(fFlags1 & X86_PTE_A))
3596 {
3597 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3598 AssertRC(rc);
3599 }
3600 if (!(fFlags2 & X86_PTE_A))
3601 {
3602 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3603 AssertRC(rc);
3604 }
3605 return VINF_SUCCESS;
3606 }
3607 /* sort out which page */
3608 }
3609 else
3610 GCPtrSrc += cb1; /* fault on 2nd page */
3611 }
3612 }
3613
3614 /*
3615 * Raise a #PF if we're allowed to do that.
3616 */
3617 /* Calc the error bits. */
3618 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3619 uint32_t uErr;
3620 switch (rc)
3621 {
3622 case VINF_SUCCESS:
3623 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3624 rc = VERR_ACCESS_DENIED;
3625 break;
3626
3627 case VERR_PAGE_NOT_PRESENT:
3628 case VERR_PAGE_TABLE_NOT_PRESENT:
3629 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3630 break;
3631
3632 default:
3633 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3634 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3635 return rc;
3636 }
3637 if (fRaiseTrap)
3638 {
3639 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3640 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3641 }
3642 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3643 return rc;
3644}
3645
3646
3647/**
3648 * Performs a write to guest virtual memory for instruction emulation.
3649 *
3650 * This will check permissions, raise exceptions and update the dirty and access
3651 * bits.
3652 *
3653 * @returns VBox status code suitable to scheduling.
3654 * @retval VINF_SUCCESS if the read was performed successfully.
3655 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3656 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3657 *
3658 * @param pVCpu The VMCPU handle.
3659 * @param pCtxCore The context core.
3660 * @param GCPtrDst The destination address.
3661 * @param pvSrc What to write.
3662 * @param cb The number of bytes to write. Not more than a page.
3663 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3664 * an appropriate error status will be returned (no
3665 * informational at all).
3666 *
3667 * @remarks Takes the PGM lock.
3668 * @remarks A page fault on the 2nd page of the access will be raised without
3669 * writing the bits on the first page since we're ASSUMING that the
3670 * caller is emulating an instruction access.
3671 * @remarks This function will dynamically map physical pages in GC. This may
3672 * unmap mappings done by the caller. Be careful!
3673 */
3674VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3675{
3676 Assert(cb <= PAGE_SIZE);
3677 PVM pVM = pVCpu->CTX_SUFF(pVM);
3678
3679 /*
3680 * 1. Translate virtual to physical. This may fault.
3681 * 2. Map the physical address.
3682 * 3. Do the write operation.
3683 * 4. Set access bits if required.
3684 */
3685 /** @todo Since this method is frequently used by EMInterpret or IOM
3686 * upon a write fault to an write access monitored page, we can
3687 * reuse the guest page table walking from the \#PF code. */
3688 int rc;
3689 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3690 if (cb <= cb1)
3691 {
3692 /*
3693 * Not crossing pages.
3694 */
3695 RTGCPHYS GCPhys;
3696 uint64_t fFlags;
3697 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3698 if (RT_SUCCESS(rc))
3699 {
3700 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3701 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3702 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3703 {
3704 void *pvDst;
3705 PGMPAGEMAPLOCK Lock;
3706 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3707 switch (rc)
3708 {
3709 case VINF_SUCCESS:
3710 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3711 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3712 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3713 PGMPhysReleasePageMappingLock(pVM, &Lock);
3714 break;
3715 case VERR_PGM_PHYS_PAGE_RESERVED:
3716 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3717 /* bit bucket */
3718 break;
3719 default:
3720 AssertMsgFailed(("%Rrc\n", rc));
3721 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3722 return rc;
3723 }
3724
3725 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3726 {
3727 /** @todo dirty & access bit emulation isn't 100% correct. */
3728 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3729 AssertRC(rc);
3730 }
3731 return VINF_SUCCESS;
3732 }
3733 rc = VERR_ACCESS_DENIED;
3734 }
3735 }
3736 else
3737 {
3738 /*
3739 * Crosses pages.
3740 */
3741 size_t cb2 = cb - cb1;
3742 uint64_t fFlags1;
3743 RTGCPHYS GCPhys1;
3744 uint64_t fFlags2;
3745 RTGCPHYS GCPhys2;
3746 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3747 if (RT_SUCCESS(rc))
3748 {
3749 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3750 if (RT_SUCCESS(rc))
3751 {
3752 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3753 && (fFlags2 & X86_PTE_RW))
3754 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3755 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3756 {
3757 void *pvDst;
3758 PGMPAGEMAPLOCK Lock;
3759 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3760 switch (rc)
3761 {
3762 case VINF_SUCCESS:
3763 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3764 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3765 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3766 PGMPhysReleasePageMappingLock(pVM, &Lock);
3767 break;
3768 case VERR_PGM_PHYS_PAGE_RESERVED:
3769 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3770 /* bit bucket */
3771 break;
3772 default:
3773 AssertMsgFailed(("%Rrc\n", rc));
3774 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3775 return rc;
3776 }
3777
3778 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3779 switch (rc)
3780 {
3781 case VINF_SUCCESS:
3782 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3783 PGMPhysReleasePageMappingLock(pVM, &Lock);
3784 break;
3785 case VERR_PGM_PHYS_PAGE_RESERVED:
3786 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3787 /* bit bucket */
3788 break;
3789 default:
3790 AssertMsgFailed(("%Rrc\n", rc));
3791 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3792 return rc;
3793 }
3794
3795 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3796 {
3797 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3798 AssertRC(rc);
3799 }
3800 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3801 {
3802 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3803 AssertRC(rc);
3804 }
3805 return VINF_SUCCESS;
3806 }
3807 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3808 GCPtrDst += cb1; /* fault on the 2nd page. */
3809 rc = VERR_ACCESS_DENIED;
3810 }
3811 else
3812 GCPtrDst += cb1; /* fault on the 2nd page. */
3813 }
3814 }
3815
3816 /*
3817 * Raise a #PF if we're allowed to do that.
3818 */
3819 /* Calc the error bits. */
3820 uint32_t uErr;
3821 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3822 switch (rc)
3823 {
3824 case VINF_SUCCESS:
3825 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3826 rc = VERR_ACCESS_DENIED;
3827 break;
3828
3829 case VERR_ACCESS_DENIED:
3830 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3831 break;
3832
3833 case VERR_PAGE_NOT_PRESENT:
3834 case VERR_PAGE_TABLE_NOT_PRESENT:
3835 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3836 break;
3837
3838 default:
3839 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3840 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3841 return rc;
3842 }
3843 if (fRaiseTrap)
3844 {
3845 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3846 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3847 }
3848 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3849 return rc;
3850}
3851
3852/**
3853 * Return the page type of the specified physical address
3854 *
3855 * @param pVM VM Handle.
3856 * @param GCPhys Guest physical address
3857 */
3858VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
3859{
3860 PPGMPAGE pPage;
3861
3862 pPage = pgmPhysGetPage(pVM, GCPhys);
3863 if (pPage)
3864 return (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
3865
3866 return PGMPAGETYPE_INVALID;
3867}
3868
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette