VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 24700

Last change on this file since 24700 was 24700, checked in by vboxsync, 15 years ago

Paranoia

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 119.1 KB
Line 
1/* $Id: PGMAllPhys.cpp 24700 2009-11-16 15:22:05Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 PVMCPU pVCpu = VMMGetCpu(pVM);
66
67 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
68 switch (pRom->aPages[iPage].enmProt)
69 {
70 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
71 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
72 {
73 /*
74 * If it's a simple instruction which doesn't change the cpu state
75 * we will simply skip it. Otherwise we'll have to defer it to REM.
76 */
77 uint32_t cbOp;
78 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
79 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
80 if ( RT_SUCCESS(rc)
81 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
82 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
83 {
84 switch (pDis->opcode)
85 {
86 /** @todo Find other instructions we can safely skip, possibly
87 * adding this kind of detection to DIS or EM. */
88 case OP_MOV:
89 pRegFrame->rip += cbOp;
90 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
91 return VINF_SUCCESS;
92 }
93 }
94 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
95 return rc;
96 break;
97 }
98
99 case PGMROMPROT_READ_RAM_WRITE_RAM:
100 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
101 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
102 AssertRC(rc);
103 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
104
105 case PGMROMPROT_READ_ROM_WRITE_RAM:
106 /* Handle it in ring-3 because it's *way* easier there. */
107 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
108 break;
109
110 default:
111 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
112 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
113 VERR_INTERNAL_ERROR);
114 }
115
116 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
117 return VINF_EM_RAW_EMULATE_INSTR;
118}
119
120#endif /* IN_RING3 */
121
122/**
123 * Checks if Address Gate 20 is enabled or not.
124 *
125 * @returns true if enabled.
126 * @returns false if disabled.
127 * @param pVCpu VMCPU handle.
128 */
129VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
130{
131 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
132 return pVCpu->pgm.s.fA20Enabled;
133}
134
135
136/**
137 * Validates a GC physical address.
138 *
139 * @returns true if valid.
140 * @returns false if invalid.
141 * @param pVM The VM handle.
142 * @param GCPhys The physical address to validate.
143 */
144VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
145{
146 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
147 return pPage != NULL;
148}
149
150
151/**
152 * Checks if a GC physical address is a normal page,
153 * i.e. not ROM, MMIO or reserved.
154 *
155 * @returns true if normal.
156 * @returns false if invalid, ROM, MMIO or reserved page.
157 * @param pVM The VM handle.
158 * @param GCPhys The physical address to check.
159 */
160VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
161{
162 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
163 return pPage
164 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
165}
166
167
168/**
169 * Converts a GC physical address to a HC physical address.
170 *
171 * @returns VINF_SUCCESS on success.
172 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
173 * page but has no physical backing.
174 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
175 * GC physical address.
176 *
177 * @param pVM The VM handle.
178 * @param GCPhys The GC physical address to convert.
179 * @param pHCPhys Where to store the HC physical address on success.
180 */
181VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
182{
183 pgmLock(pVM);
184 PPGMPAGE pPage;
185 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
186 if (RT_SUCCESS(rc))
187 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
188 pgmUnlock(pVM);
189 return rc;
190}
191
192
193/**
194 * Invalidates the all page mapping TLBs.
195 *
196 * @param pVM The VM handle.
197 */
198VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
199{
200 pgmLock(pVM);
201 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
202 {
203 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
204 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
205 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
206 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
207 }
208 /* @todo clear the RC TLB whenever we add it. */
209 pgmUnlock(pVM);
210}
211
212/**
213 * Makes sure that there is at least one handy page ready for use.
214 *
215 * This will also take the appropriate actions when reaching water-marks.
216 *
217 * @returns VBox status code.
218 * @retval VINF_SUCCESS on success.
219 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
220 *
221 * @param pVM The VM handle.
222 *
223 * @remarks Must be called from within the PGM critical section. It may
224 * nip back to ring-3/0 in some cases.
225 */
226static int pgmPhysEnsureHandyPage(PVM pVM)
227{
228 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
229
230 /*
231 * Do we need to do anything special?
232 */
233#ifdef IN_RING3
234 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
235#else
236 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
237#endif
238 {
239 /*
240 * Allocate pages only if we're out of them, or in ring-3, almost out.
241 */
242#ifdef IN_RING3
243 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
244#else
245 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
246#endif
247 {
248 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
249 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
250#ifdef IN_RING3
251 int rc = PGMR3PhysAllocateHandyPages(pVM);
252#else
253 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
254#endif
255 if (RT_UNLIKELY(rc != VINF_SUCCESS))
256 {
257 if (RT_FAILURE(rc))
258 return rc;
259 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
260 if (!pVM->pgm.s.cHandyPages)
261 {
262 LogRel(("PGM: no more handy pages!\n"));
263 return VERR_EM_NO_MEMORY;
264 }
265 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
266 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
267#ifdef IN_RING3
268 REMR3NotifyFF(pVM);
269#else
270 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
271#endif
272 }
273 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
274 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
275 ("%u\n", pVM->pgm.s.cHandyPages),
276 VERR_INTERNAL_ERROR);
277 }
278 else
279 {
280 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
281 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
282#ifndef IN_RING3
283 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
284 {
285 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
286 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
287 }
288#endif
289 }
290 }
291
292 return VINF_SUCCESS;
293}
294
295
296/**
297 * Replace a zero or shared page with new page that we can write to.
298 *
299 * @returns The following VBox status codes.
300 * @retval VINF_SUCCESS on success, pPage is modified.
301 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
302 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
303 *
304 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
305 *
306 * @param pVM The VM address.
307 * @param pPage The physical page tracking structure. This will
308 * be modified on success.
309 * @param GCPhys The address of the page.
310 *
311 * @remarks Must be called from within the PGM critical section. It may
312 * nip back to ring-3/0 in some cases.
313 *
314 * @remarks This function shouldn't really fail, however if it does
315 * it probably means we've screwed up the size of handy pages and/or
316 * the low-water mark. Or, that some device I/O is causing a lot of
317 * pages to be allocated while while the host is in a low-memory
318 * condition. This latter should be handled elsewhere and in a more
319 * controlled manner, it's on the @bugref{3170} todo list...
320 */
321int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
322{
323 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
324
325 /*
326 * Prereqs.
327 */
328 Assert(PGMIsLocked(pVM));
329 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
330 Assert(!PGM_PAGE_IS_MMIO(pPage));
331
332
333 /*
334 * Flush any shadow page table mappings of the page.
335 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
336 */
337 bool fFlushTLBs = false;
338 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
339 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
340
341 /*
342 * Ensure that we've got a page handy, take it and use it.
343 */
344 int rc2 = pgmPhysEnsureHandyPage(pVM);
345 if (RT_FAILURE(rc2))
346 {
347 if (fFlushTLBs)
348 PGM_INVL_ALL_VCPU_TLBS(pVM);
349 Assert(rc2 == VERR_EM_NO_MEMORY);
350 return rc2;
351 }
352 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
353 Assert(PGMIsLocked(pVM));
354 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
355 Assert(!PGM_PAGE_IS_MMIO(pPage));
356
357 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
358 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
359 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
360 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
361 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
362 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
363
364 /*
365 * There are one or two action to be taken the next time we allocate handy pages:
366 * - Tell the GMM (global memory manager) what the page is being used for.
367 * (Speeds up replacement operations - sharing and defragmenting.)
368 * - If the current backing is shared, it must be freed.
369 */
370 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
371 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
372
373 if (PGM_PAGE_IS_SHARED(pPage))
374 {
375 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
376 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
377 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
378
379 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
380 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
381 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
382 pVM->pgm.s.cSharedPages--;
383 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
384 }
385 else
386 {
387 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
388 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
389 pVM->pgm.s.cZeroPages--;
390 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
391 }
392
393 /*
394 * Do the PGMPAGE modifications.
395 */
396 pVM->pgm.s.cPrivatePages++;
397 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
398 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
399 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
400
401 if ( fFlushTLBs
402 && rc != VINF_PGM_GCPHYS_ALIASED)
403 PGM_INVL_ALL_VCPU_TLBS(pVM);
404 return rc;
405}
406
407
408/**
409 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
410 *
411 * @returns VBox strict status code.
412 * @retval VINF_SUCCESS on success.
413 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
414 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
415 *
416 * @param pVM The VM address.
417 * @param pPage The physical page tracking structure.
418 * @param GCPhys The address of the page.
419 *
420 * @remarks Called from within the PGM critical section.
421 */
422int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
423{
424 switch (PGM_PAGE_GET_STATE(pPage))
425 {
426 case PGM_PAGE_STATE_WRITE_MONITORED:
427 PGM_PAGE_SET_WRITTEN_TO(pPage);
428 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
429 Assert(pVM->pgm.s.cMonitoredPages > 0);
430 pVM->pgm.s.cMonitoredPages--;
431 pVM->pgm.s.cWrittenToPages++;
432 /* fall thru */
433 default: /* to shut up GCC */
434 case PGM_PAGE_STATE_ALLOCATED:
435 return VINF_SUCCESS;
436
437 /*
438 * Zero pages can be dummy pages for MMIO or reserved memory,
439 * so we need to check the flags before joining cause with
440 * shared page replacement.
441 */
442 case PGM_PAGE_STATE_ZERO:
443 if (PGM_PAGE_IS_MMIO(pPage))
444 return VERR_PGM_PHYS_PAGE_RESERVED;
445 /* fall thru */
446 case PGM_PAGE_STATE_SHARED:
447 return pgmPhysAllocPage(pVM, pPage, GCPhys);
448 }
449}
450
451
452/**
453 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
454 *
455 * @returns VBox strict status code.
456 * @retval VINF_SUCCESS on success.
457 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
458 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
459 *
460 * @param pVM The VM address.
461 * @param pPage The physical page tracking structure.
462 * @param GCPhys The address of the page.
463 */
464int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
465{
466 int rc = pgmLock(pVM);
467 if (RT_SUCCESS(rc))
468 {
469 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
470 pgmUnlock(pVM);
471 }
472 return rc;
473}
474
475
476/**
477 * Internal usage: Map the page specified by its GMM ID.
478 *
479 * This is similar to pgmPhysPageMap
480 *
481 * @returns VBox status code.
482 *
483 * @param pVM The VM handle.
484 * @param idPage The Page ID.
485 * @param HCPhys The physical address (for RC).
486 * @param ppv Where to store the mapping address.
487 *
488 * @remarks Called from within the PGM critical section. The mapping is only
489 * valid while your inside this section.
490 */
491int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
492{
493 /*
494 * Validation.
495 */
496 Assert(PGMIsLocked(pVM));
497 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
498 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
499 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
500
501#ifdef IN_RC
502 /*
503 * Map it by HCPhys.
504 */
505 return PGMDynMapHCPage(pVM, HCPhys, ppv);
506
507#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
508 /*
509 * Map it by HCPhys.
510 */
511 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
512
513#else
514 /*
515 * Find/make Chunk TLB entry for the mapping chunk.
516 */
517 PPGMCHUNKR3MAP pMap;
518 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
519 if (pTlbe->idChunk == idChunk)
520 {
521 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
522 pMap = pTlbe->pChunk;
523 }
524 else
525 {
526 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
527
528 /*
529 * Find the chunk, map it if necessary.
530 */
531 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
532 if (!pMap)
533 {
534# ifdef IN_RING0
535 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
536 AssertRCReturn(rc, rc);
537 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
538 Assert(pMap);
539# else
540 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
541 if (RT_FAILURE(rc))
542 return rc;
543# endif
544 }
545
546 /*
547 * Enter it into the Chunk TLB.
548 */
549 pTlbe->idChunk = idChunk;
550 pTlbe->pChunk = pMap;
551 pMap->iAge = 0;
552 }
553
554 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
555 return VINF_SUCCESS;
556#endif
557}
558
559
560/**
561 * Maps a page into the current virtual address space so it can be accessed.
562 *
563 * @returns VBox status code.
564 * @retval VINF_SUCCESS on success.
565 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
566 *
567 * @param pVM The VM address.
568 * @param pPage The physical page tracking structure.
569 * @param GCPhys The address of the page.
570 * @param ppMap Where to store the address of the mapping tracking structure.
571 * @param ppv Where to store the mapping address of the page. The page
572 * offset is masked off!
573 *
574 * @remarks Called from within the PGM critical section.
575 */
576static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
577{
578 Assert(PGMIsLocked(pVM));
579
580#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
581 /*
582 * Just some sketchy GC/R0-darwin code.
583 */
584 *ppMap = NULL;
585 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
586 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
587# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
588 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
589# else
590 PGMDynMapHCPage(pVM, HCPhys, ppv);
591# endif
592 return VINF_SUCCESS;
593
594#else /* IN_RING3 || IN_RING0 */
595
596
597 /*
598 * Special case: ZERO and MMIO2 pages.
599 */
600 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
601 if (idChunk == NIL_GMM_CHUNKID)
602 {
603 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
604 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
605 {
606 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
607 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
608 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
609 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
610 }
611 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
612 {
613 /** @todo deal with aliased MMIO2 pages somehow...
614 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
615 * them, that would also avoid this mess. It would actually be kind of
616 * elegant... */
617 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
618 }
619 else
620 {
621 /** @todo handle MMIO2 */
622 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
623 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
624 ("pPage=%R[pgmpage]\n", pPage),
625 VERR_INTERNAL_ERROR_2);
626 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
627 }
628 *ppMap = NULL;
629 return VINF_SUCCESS;
630 }
631
632 /*
633 * Find/make Chunk TLB entry for the mapping chunk.
634 */
635 PPGMCHUNKR3MAP pMap;
636 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
637 if (pTlbe->idChunk == idChunk)
638 {
639 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
640 pMap = pTlbe->pChunk;
641 }
642 else
643 {
644 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
645
646 /*
647 * Find the chunk, map it if necessary.
648 */
649 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
650 if (!pMap)
651 {
652#ifdef IN_RING0
653 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
654 AssertRCReturn(rc, rc);
655 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
656 Assert(pMap);
657#else
658 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
659 if (RT_FAILURE(rc))
660 return rc;
661#endif
662 }
663
664 /*
665 * Enter it into the Chunk TLB.
666 */
667 pTlbe->idChunk = idChunk;
668 pTlbe->pChunk = pMap;
669 pMap->iAge = 0;
670 }
671
672 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
673 *ppMap = pMap;
674 return VINF_SUCCESS;
675#endif /* IN_RING3 */
676}
677
678
679/**
680 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
681 *
682 * This is typically used is paths where we cannot use the TLB methods (like ROM
683 * pages) or where there is no point in using them since we won't get many hits.
684 *
685 * @returns VBox strict status code.
686 * @retval VINF_SUCCESS on success.
687 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
688 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
689 *
690 * @param pVM The VM address.
691 * @param pPage The physical page tracking structure.
692 * @param GCPhys The address of the page.
693 * @param ppv Where to store the mapping address of the page. The page
694 * offset is masked off!
695 *
696 * @remarks Called from within the PGM critical section. The mapping is only
697 * valid while your inside this section.
698 */
699int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
700{
701 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
702 if (RT_SUCCESS(rc))
703 {
704 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
705 PPGMPAGEMAP pMapIgnore;
706 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
707 if (RT_FAILURE(rc2)) /* preserve rc */
708 rc = rc2;
709 }
710 return rc;
711}
712
713
714/**
715 * Maps a page into the current virtual address space so it can be accessed for
716 * both writing and reading.
717 *
718 * This is typically used is paths where we cannot use the TLB methods (like ROM
719 * pages) or where there is no point in using them since we won't get many hits.
720 *
721 * @returns VBox status code.
722 * @retval VINF_SUCCESS on success.
723 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
724 *
725 * @param pVM The VM address.
726 * @param pPage The physical page tracking structure. Must be in the
727 * allocated state.
728 * @param GCPhys The address of the page.
729 * @param ppv Where to store the mapping address of the page. The page
730 * offset is masked off!
731 *
732 * @remarks Called from within the PGM critical section. The mapping is only
733 * valid while your inside this section.
734 */
735int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
736{
737 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
738 PPGMPAGEMAP pMapIgnore;
739 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
740}
741
742
743/**
744 * Maps a page into the current virtual address space so it can be accessed for
745 * reading.
746 *
747 * This is typically used is paths where we cannot use the TLB methods (like ROM
748 * pages) or where there is no point in using them since we won't get many hits.
749 *
750 * @returns VBox status code.
751 * @retval VINF_SUCCESS on success.
752 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
753 *
754 * @param pVM The VM address.
755 * @param pPage The physical page tracking structure.
756 * @param GCPhys The address of the page.
757 * @param ppv Where to store the mapping address of the page. The page
758 * offset is masked off!
759 *
760 * @remarks Called from within the PGM critical section. The mapping is only
761 * valid while your inside this section.
762 */
763int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
764{
765 PPGMPAGEMAP pMapIgnore;
766 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
767}
768
769
770#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
771/**
772 * Load a guest page into the ring-3 physical TLB.
773 *
774 * @returns VBox status code.
775 * @retval VINF_SUCCESS on success
776 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
777 * @param pPGM The PGM instance pointer.
778 * @param GCPhys The guest physical address in question.
779 */
780int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
781{
782 Assert(PGMIsLocked(pVM));
783 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
784
785 /*
786 * Find the ram range.
787 * 99.8% of requests are expected to be in the first range.
788 */
789 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
790 RTGCPHYS off = GCPhys - pRam->GCPhys;
791 if (RT_UNLIKELY(off >= pRam->cb))
792 {
793 do
794 {
795 pRam = pRam->CTX_SUFF(pNext);
796 if (!pRam)
797 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
798 off = GCPhys - pRam->GCPhys;
799 } while (off >= pRam->cb);
800 }
801
802 /*
803 * Map the page.
804 * Make a special case for the zero page as it is kind of special.
805 */
806 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
807 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
808 if (!PGM_PAGE_IS_ZERO(pPage))
809 {
810 void *pv;
811 PPGMPAGEMAP pMap;
812 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
813 if (RT_FAILURE(rc))
814 return rc;
815 pTlbe->pMap = pMap;
816 pTlbe->pv = pv;
817 }
818 else
819 {
820 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
821 pTlbe->pMap = NULL;
822 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
823 }
824 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
825 pTlbe->pPage = pPage;
826 return VINF_SUCCESS;
827}
828
829
830/**
831 * Load a guest page into the ring-3 physical TLB.
832 *
833 * @returns VBox status code.
834 * @retval VINF_SUCCESS on success
835 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
836 *
837 * @param pPGM The PGM instance pointer.
838 * @param pPage Pointer to the PGMPAGE structure corresponding to
839 * GCPhys.
840 * @param GCPhys The guest physical address in question.
841 */
842int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
843{
844 Assert(PGMIsLocked(pVM));
845 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
846
847 /*
848 * Map the page.
849 * Make a special case for the zero page as it is kind of special.
850 */
851 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
852 if (!PGM_PAGE_IS_ZERO(pPage))
853 {
854 void *pv;
855 PPGMPAGEMAP pMap;
856 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
857 if (RT_FAILURE(rc))
858 return rc;
859 pTlbe->pMap = pMap;
860 pTlbe->pv = pv;
861 }
862 else
863 {
864 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
865 pTlbe->pMap = NULL;
866 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
867 }
868 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
869 pTlbe->pPage = pPage;
870 return VINF_SUCCESS;
871}
872#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
873
874
875/**
876 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
877 * own the PGM lock and therefore not need to lock the mapped page.
878 *
879 * @returns VBox status code.
880 * @retval VINF_SUCCESS on success.
881 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
882 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
883 *
884 * @param pVM The VM handle.
885 * @param GCPhys The guest physical address of the page that should be mapped.
886 * @param pPage Pointer to the PGMPAGE structure for the page.
887 * @param ppv Where to store the address corresponding to GCPhys.
888 *
889 * @internal
890 */
891int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
892{
893 int rc;
894 AssertReturn(pPage, VERR_INTERNAL_ERROR);
895 Assert(PGMIsLocked(pVM));
896
897 /*
898 * Make sure the page is writable.
899 */
900 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
901 {
902 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
903 if (RT_FAILURE(rc))
904 return rc;
905 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
906 }
907 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
908
909 /*
910 * Get the mapping address.
911 */
912#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
913 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
914#else
915 PPGMPAGEMAPTLBE pTlbe;
916 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
917 if (RT_FAILURE(rc))
918 return rc;
919 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
920#endif
921 return VINF_SUCCESS;
922}
923
924
925/**
926 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
927 * own the PGM lock and therefore not need to lock the mapped page.
928 *
929 * @returns VBox status code.
930 * @retval VINF_SUCCESS on success.
931 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
932 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
933 *
934 * @param pVM The VM handle.
935 * @param GCPhys The guest physical address of the page that should be mapped.
936 * @param pPage Pointer to the PGMPAGE structure for the page.
937 * @param ppv Where to store the address corresponding to GCPhys.
938 *
939 * @internal
940 */
941int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
942{
943 AssertReturn(pPage, VERR_INTERNAL_ERROR);
944 Assert(PGMIsLocked(pVM));
945 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
946
947 /*
948 * Get the mapping address.
949 */
950#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
951 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
952#else
953 PPGMPAGEMAPTLBE pTlbe;
954 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
955 if (RT_FAILURE(rc))
956 return rc;
957 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
958#endif
959 return VINF_SUCCESS;
960}
961
962
963/**
964 * Requests the mapping of a guest page into the current context.
965 *
966 * This API should only be used for very short term, as it will consume
967 * scarse resources (R0 and GC) in the mapping cache. When you're done
968 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
969 *
970 * This API will assume your intention is to write to the page, and will
971 * therefore replace shared and zero pages. If you do not intend to modify
972 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
973 *
974 * @returns VBox status code.
975 * @retval VINF_SUCCESS on success.
976 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
977 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
978 *
979 * @param pVM The VM handle.
980 * @param GCPhys The guest physical address of the page that should be mapped.
981 * @param ppv Where to store the address corresponding to GCPhys.
982 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
983 *
984 * @remarks The caller is responsible for dealing with access handlers.
985 * @todo Add an informational return code for pages with access handlers?
986 *
987 * @remark Avoid calling this API from within critical sections (other than the
988 * PGM one) because of the deadlock risk. External threads may need to
989 * delegate jobs to the EMTs.
990 * @thread Any thread.
991 */
992VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
993{
994#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
995
996 /*
997 * Find the page and make sure it's writable.
998 */
999 PPGMPAGE pPage;
1000 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1001 if (RT_SUCCESS(rc))
1002 {
1003 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1004 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1005 if (RT_SUCCESS(rc))
1006 {
1007 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1008# if 0
1009 pLock->pvMap = 0;
1010 pLock->pvPage = pPage;
1011# else
1012 pLock->u32Dummy = UINT32_MAX;
1013# endif
1014 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1015 rc = VINF_SUCCESS;
1016 }
1017 }
1018
1019#else /* IN_RING3 || IN_RING0 */
1020 int rc = pgmLock(pVM);
1021 AssertRCReturn(rc, rc);
1022
1023 /*
1024 * Query the Physical TLB entry for the page (may fail).
1025 */
1026 PPGMPAGEMAPTLBE pTlbe;
1027 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1028 if (RT_SUCCESS(rc))
1029 {
1030 /*
1031 * If the page is shared, the zero page, or being write monitored
1032 * it must be converted to an page that's writable if possible.
1033 */
1034 PPGMPAGE pPage = pTlbe->pPage;
1035 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1036 {
1037 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1038 if (RT_SUCCESS(rc))
1039 {
1040 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1041 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1042 }
1043 }
1044 if (RT_SUCCESS(rc))
1045 {
1046 /*
1047 * Now, just perform the locking and calculate the return address.
1048 */
1049 PPGMPAGEMAP pMap = pTlbe->pMap;
1050 if (pMap)
1051 pMap->cRefs++;
1052
1053 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1054 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1055 {
1056 if (cLocks == 0)
1057 pVM->pgm.s.cWriteLockedPages++;
1058 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1059 }
1060 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1061 {
1062 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1063 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1064 if (pMap)
1065 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1066 }
1067
1068 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1069 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1070 pLock->pvMap = pMap;
1071 }
1072 }
1073
1074 pgmUnlock(pVM);
1075#endif /* IN_RING3 || IN_RING0 */
1076 return rc;
1077}
1078
1079
1080/**
1081 * Requests the mapping of a guest page into the current context.
1082 *
1083 * This API should only be used for very short term, as it will consume
1084 * scarse resources (R0 and GC) in the mapping cache. When you're done
1085 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1086 *
1087 * @returns VBox status code.
1088 * @retval VINF_SUCCESS on success.
1089 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1090 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1091 *
1092 * @param pVM The VM handle.
1093 * @param GCPhys The guest physical address of the page that should be mapped.
1094 * @param ppv Where to store the address corresponding to GCPhys.
1095 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1096 *
1097 * @remarks The caller is responsible for dealing with access handlers.
1098 * @todo Add an informational return code for pages with access handlers?
1099 *
1100 * @remark Avoid calling this API from within critical sections (other than
1101 * the PGM one) because of the deadlock risk.
1102 * @thread Any thread.
1103 */
1104VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1105{
1106#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1107
1108 /*
1109 * Find the page and make sure it's readable.
1110 */
1111 PPGMPAGE pPage;
1112 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1113 if (RT_SUCCESS(rc))
1114 {
1115 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1116 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1117 else
1118 {
1119 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1120# if 0
1121 pLock->pvMap = 0;
1122 pLock->pvPage = pPage;
1123# else
1124 pLock->u32Dummy = UINT32_MAX;
1125# endif
1126 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1127 rc = VINF_SUCCESS;
1128 }
1129 }
1130
1131#else /* IN_RING3 || IN_RING0 */
1132 int rc = pgmLock(pVM);
1133 AssertRCReturn(rc, rc);
1134
1135 /*
1136 * Query the Physical TLB entry for the page (may fail).
1137 */
1138 PPGMPAGEMAPTLBE pTlbe;
1139 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1140 if (RT_SUCCESS(rc))
1141 {
1142 /* MMIO pages doesn't have any readable backing. */
1143 PPGMPAGE pPage = pTlbe->pPage;
1144 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1145 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1146 else
1147 {
1148 /*
1149 * Now, just perform the locking and calculate the return address.
1150 */
1151 PPGMPAGEMAP pMap = pTlbe->pMap;
1152 if (pMap)
1153 pMap->cRefs++;
1154
1155 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1156 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1157 {
1158 if (cLocks == 0)
1159 pVM->pgm.s.cReadLockedPages++;
1160 PGM_PAGE_INC_READ_LOCKS(pPage);
1161 }
1162 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1163 {
1164 PGM_PAGE_INC_READ_LOCKS(pPage);
1165 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1166 if (pMap)
1167 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1168 }
1169
1170 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1171 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1172 pLock->pvMap = pMap;
1173 }
1174 }
1175
1176 pgmUnlock(pVM);
1177#endif /* IN_RING3 || IN_RING0 */
1178 return rc;
1179}
1180
1181
1182/**
1183 * Requests the mapping of a guest page given by virtual address into the current context.
1184 *
1185 * This API should only be used for very short term, as it will consume
1186 * scarse resources (R0 and GC) in the mapping cache. When you're done
1187 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1188 *
1189 * This API will assume your intention is to write to the page, and will
1190 * therefore replace shared and zero pages. If you do not intend to modify
1191 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1192 *
1193 * @returns VBox status code.
1194 * @retval VINF_SUCCESS on success.
1195 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1196 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1197 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1198 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1199 *
1200 * @param pVCpu VMCPU handle.
1201 * @param GCPhys The guest physical address of the page that should be mapped.
1202 * @param ppv Where to store the address corresponding to GCPhys.
1203 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1204 *
1205 * @remark Avoid calling this API from within critical sections (other than
1206 * the PGM one) because of the deadlock risk.
1207 * @thread EMT
1208 */
1209VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1210{
1211 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1212 RTGCPHYS GCPhys;
1213 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1214 if (RT_SUCCESS(rc))
1215 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1216 return rc;
1217}
1218
1219
1220/**
1221 * Requests the mapping of a guest page given by virtual address into the current context.
1222 *
1223 * This API should only be used for very short term, as it will consume
1224 * scarse resources (R0 and GC) in the mapping cache. When you're done
1225 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1226 *
1227 * @returns VBox status code.
1228 * @retval VINF_SUCCESS on success.
1229 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1230 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1231 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1232 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1233 *
1234 * @param pVCpu VMCPU handle.
1235 * @param GCPhys The guest physical address of the page that should be mapped.
1236 * @param ppv Where to store the address corresponding to GCPhys.
1237 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1238 *
1239 * @remark Avoid calling this API from within critical sections (other than
1240 * the PGM one) because of the deadlock risk.
1241 * @thread EMT
1242 */
1243VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1244{
1245 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1246 RTGCPHYS GCPhys;
1247 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1248 if (RT_SUCCESS(rc))
1249 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1250 return rc;
1251}
1252
1253
1254/**
1255 * Release the mapping of a guest page.
1256 *
1257 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1258 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1259 *
1260 * @param pVM The VM handle.
1261 * @param pLock The lock structure initialized by the mapping function.
1262 */
1263VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1264{
1265#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1266 /* currently nothing to do here. */
1267 Assert(pLock->u32Dummy == UINT32_MAX);
1268 pLock->u32Dummy = 0;
1269
1270#else /* IN_RING3 */
1271 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1272 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1273 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1274
1275 pLock->uPageAndType = 0;
1276 pLock->pvMap = NULL;
1277
1278 pgmLock(pVM);
1279 if (fWriteLock)
1280 {
1281 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1282 Assert(cLocks > 0);
1283 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1284 {
1285 if (cLocks == 1)
1286 {
1287 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1288 pVM->pgm.s.cWriteLockedPages--;
1289 }
1290 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1291 }
1292
1293 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1294 {
1295 PGM_PAGE_SET_WRITTEN_TO(pPage);
1296 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1297 Assert(pVM->pgm.s.cMonitoredPages > 0);
1298 pVM->pgm.s.cMonitoredPages--;
1299 pVM->pgm.s.cWrittenToPages++;
1300 }
1301 }
1302 else
1303 {
1304 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1305 Assert(cLocks > 0);
1306 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1307 {
1308 if (cLocks == 1)
1309 {
1310 Assert(pVM->pgm.s.cReadLockedPages > 0);
1311 pVM->pgm.s.cReadLockedPages--;
1312 }
1313 PGM_PAGE_DEC_READ_LOCKS(pPage);
1314 }
1315 }
1316
1317 if (pMap)
1318 {
1319 Assert(pMap->cRefs >= 1);
1320 pMap->cRefs--;
1321 pMap->iAge = 0;
1322 }
1323 pgmUnlock(pVM);
1324#endif /* IN_RING3 */
1325}
1326
1327
1328/**
1329 * Converts a GC physical address to a HC ring-3 pointer.
1330 *
1331 * @returns VINF_SUCCESS on success.
1332 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1333 * page but has no physical backing.
1334 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1335 * GC physical address.
1336 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1337 * a dynamic ram chunk boundary
1338 *
1339 * @param pVM The VM handle.
1340 * @param GCPhys The GC physical address to convert.
1341 * @param cbRange Physical range
1342 * @param pR3Ptr Where to store the R3 pointer on success.
1343 *
1344 * @deprecated Avoid when possible!
1345 */
1346VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1347{
1348/** @todo this is kind of hacky and needs some more work. */
1349#ifndef DEBUG_sandervl
1350 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1351#endif
1352
1353 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1354#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1355 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1356#else
1357 pgmLock(pVM);
1358
1359 PPGMRAMRANGE pRam;
1360 PPGMPAGE pPage;
1361 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1362 if (RT_SUCCESS(rc))
1363 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1364
1365 pgmUnlock(pVM);
1366 Assert(rc <= VINF_SUCCESS);
1367 return rc;
1368#endif
1369}
1370
1371
1372#ifdef VBOX_STRICT
1373/**
1374 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1375 *
1376 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1377 * @param pVM The VM handle.
1378 * @param GCPhys The GC Physical addresss.
1379 * @param cbRange Physical range.
1380 *
1381 * @deprecated Avoid when possible.
1382 */
1383VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1384{
1385 RTR3PTR R3Ptr;
1386 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1387 if (RT_SUCCESS(rc))
1388 return R3Ptr;
1389 return NIL_RTR3PTR;
1390}
1391#endif /* VBOX_STRICT */
1392
1393
1394/**
1395 * Converts a guest pointer to a GC physical address.
1396 *
1397 * This uses the current CR3/CR0/CR4 of the guest.
1398 *
1399 * @returns VBox status code.
1400 * @param pVCpu The VMCPU Handle
1401 * @param GCPtr The guest pointer to convert.
1402 * @param pGCPhys Where to store the GC physical address.
1403 */
1404VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1405{
1406 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1407 if (pGCPhys && RT_SUCCESS(rc))
1408 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1409 return rc;
1410}
1411
1412
1413/**
1414 * Converts a guest pointer to a HC physical address.
1415 *
1416 * This uses the current CR3/CR0/CR4 of the guest.
1417 *
1418 * @returns VBox status code.
1419 * @param pVCpu The VMCPU Handle
1420 * @param GCPtr The guest pointer to convert.
1421 * @param pHCPhys Where to store the HC physical address.
1422 */
1423VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1424{
1425 PVM pVM = pVCpu->CTX_SUFF(pVM);
1426 RTGCPHYS GCPhys;
1427 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1428 if (RT_SUCCESS(rc))
1429 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1430 return rc;
1431}
1432
1433
1434/**
1435 * Converts a guest pointer to a R3 pointer.
1436 *
1437 * This uses the current CR3/CR0/CR4 of the guest.
1438 *
1439 * @returns VBox status code.
1440 * @param pVCpu The VMCPU Handle
1441 * @param GCPtr The guest pointer to convert.
1442 * @param pR3Ptr Where to store the R3 virtual address.
1443 *
1444 * @deprecated Don't use this.
1445 */
1446VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1447{
1448 PVM pVM = pVCpu->CTX_SUFF(pVM);
1449 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1450 RTGCPHYS GCPhys;
1451 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1452 if (RT_SUCCESS(rc))
1453 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1454 return rc;
1455}
1456
1457
1458
1459#undef LOG_GROUP
1460#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1461
1462
1463#ifdef IN_RING3
1464/**
1465 * Cache PGMPhys memory access
1466 *
1467 * @param pVM VM Handle.
1468 * @param pCache Cache structure pointer
1469 * @param GCPhys GC physical address
1470 * @param pbHC HC pointer corresponding to physical page
1471 *
1472 * @thread EMT.
1473 */
1474static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1475{
1476 uint32_t iCacheIndex;
1477
1478 Assert(VM_IS_EMT(pVM));
1479
1480 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1481 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1482
1483 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1484
1485 ASMBitSet(&pCache->aEntries, iCacheIndex);
1486
1487 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1488 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1489}
1490#endif /* IN_RING3 */
1491
1492
1493/**
1494 * Deals with reading from a page with one or more ALL access handlers.
1495 *
1496 * @returns VBox status code. Can be ignored in ring-3.
1497 * @retval VINF_SUCCESS.
1498 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1499 *
1500 * @param pVM The VM handle.
1501 * @param pPage The page descriptor.
1502 * @param GCPhys The physical address to start reading at.
1503 * @param pvBuf Where to put the bits we read.
1504 * @param cb How much to read - less or equal to a page.
1505 */
1506static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1507{
1508 /*
1509 * The most frequent access here is MMIO and shadowed ROM.
1510 * The current code ASSUMES all these access handlers covers full pages!
1511 */
1512
1513 /*
1514 * Whatever we do we need the source page, map it first.
1515 */
1516 const void *pvSrc = NULL;
1517 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1518 if (RT_FAILURE(rc))
1519 {
1520 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1521 GCPhys, pPage, rc));
1522 memset(pvBuf, 0xff, cb);
1523 return VINF_SUCCESS;
1524 }
1525 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1526
1527 /*
1528 * Deal with any physical handlers.
1529 */
1530 PPGMPHYSHANDLER pPhys = NULL;
1531 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1532 {
1533#ifdef IN_RING3
1534 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1535 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1536 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1537 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1538 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1539 Assert(pPhys->CTX_SUFF(pfnHandler));
1540
1541 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1542 void *pvUser = pPhys->CTX_SUFF(pvUser);
1543
1544 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1545 STAM_PROFILE_START(&pPhys->Stat, h);
1546 Assert(PGMIsLockOwner(pVM));
1547 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1548 pgmUnlock(pVM);
1549 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1550 pgmLock(pVM);
1551# ifdef VBOX_WITH_STATISTICS
1552 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1553 if (pPhys)
1554 STAM_PROFILE_STOP(&pPhys->Stat, h);
1555# else
1556 pPhys = NULL; /* might not be valid anymore. */
1557# endif
1558 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1559#else
1560 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1561 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1562 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1563#endif
1564 }
1565
1566 /*
1567 * Deal with any virtual handlers.
1568 */
1569 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1570 {
1571 unsigned iPage;
1572 PPGMVIRTHANDLER pVirt;
1573
1574 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1575 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1576 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1577 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1578 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1579
1580#ifdef IN_RING3
1581 if (pVirt->pfnHandlerR3)
1582 {
1583 if (!pPhys)
1584 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1585 else
1586 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1587 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1588 + (iPage << PAGE_SHIFT)
1589 + (GCPhys & PAGE_OFFSET_MASK);
1590
1591 STAM_PROFILE_START(&pVirt->Stat, h);
1592 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1593 STAM_PROFILE_STOP(&pVirt->Stat, h);
1594 if (rc2 == VINF_SUCCESS)
1595 rc = VINF_SUCCESS;
1596 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1597 }
1598 else
1599 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1600#else
1601 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1602 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1603 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1604#endif
1605 }
1606
1607 /*
1608 * Take the default action.
1609 */
1610 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1611 memcpy(pvBuf, pvSrc, cb);
1612 return rc;
1613}
1614
1615
1616/**
1617 * Read physical memory.
1618 *
1619 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1620 * want to ignore those.
1621 *
1622 * @returns VBox status code. Can be ignored in ring-3.
1623 * @retval VINF_SUCCESS.
1624 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1625 *
1626 * @param pVM VM Handle.
1627 * @param GCPhys Physical address start reading from.
1628 * @param pvBuf Where to put the read bits.
1629 * @param cbRead How many bytes to read.
1630 */
1631VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1632{
1633 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1634 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1635
1636 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1637 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1638
1639 pgmLock(pVM);
1640
1641 /*
1642 * Copy loop on ram ranges.
1643 */
1644 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1645 for (;;)
1646 {
1647 /* Find range. */
1648 while (pRam && GCPhys > pRam->GCPhysLast)
1649 pRam = pRam->CTX_SUFF(pNext);
1650 /* Inside range or not? */
1651 if (pRam && GCPhys >= pRam->GCPhys)
1652 {
1653 /*
1654 * Must work our way thru this page by page.
1655 */
1656 RTGCPHYS off = GCPhys - pRam->GCPhys;
1657 while (off < pRam->cb)
1658 {
1659 unsigned iPage = off >> PAGE_SHIFT;
1660 PPGMPAGE pPage = &pRam->aPages[iPage];
1661 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1662 if (cb > cbRead)
1663 cb = cbRead;
1664
1665 /*
1666 * Any ALL access handlers?
1667 */
1668 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1669 {
1670 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1671 if (RT_FAILURE(rc))
1672 {
1673 pgmUnlock(pVM);
1674 return rc;
1675 }
1676 }
1677 else
1678 {
1679 /*
1680 * Get the pointer to the page.
1681 */
1682 const void *pvSrc;
1683 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1684 if (RT_SUCCESS(rc))
1685 memcpy(pvBuf, pvSrc, cb);
1686 else
1687 {
1688 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1689 pRam->GCPhys + off, pPage, rc));
1690 memset(pvBuf, 0xff, cb);
1691 }
1692 }
1693
1694 /* next page */
1695 if (cb >= cbRead)
1696 {
1697 pgmUnlock(pVM);
1698 return VINF_SUCCESS;
1699 }
1700 cbRead -= cb;
1701 off += cb;
1702 pvBuf = (char *)pvBuf + cb;
1703 } /* walk pages in ram range. */
1704
1705 GCPhys = pRam->GCPhysLast + 1;
1706 }
1707 else
1708 {
1709 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1710
1711 /*
1712 * Unassigned address space.
1713 */
1714 if (!pRam)
1715 break;
1716 size_t cb = pRam->GCPhys - GCPhys;
1717 if (cb >= cbRead)
1718 {
1719 memset(pvBuf, 0xff, cbRead);
1720 break;
1721 }
1722 memset(pvBuf, 0xff, cb);
1723
1724 cbRead -= cb;
1725 pvBuf = (char *)pvBuf + cb;
1726 GCPhys += cb;
1727 }
1728 } /* Ram range walk */
1729
1730 pgmUnlock(pVM);
1731 return VINF_SUCCESS;
1732}
1733
1734
1735/**
1736 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1737 *
1738 * @returns VBox status code. Can be ignored in ring-3.
1739 * @retval VINF_SUCCESS.
1740 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1741 *
1742 * @param pVM The VM handle.
1743 * @param pPage The page descriptor.
1744 * @param GCPhys The physical address to start writing at.
1745 * @param pvBuf What to write.
1746 * @param cbWrite How much to write - less or equal to a page.
1747 */
1748static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1749{
1750 void *pvDst = NULL;
1751 int rc;
1752
1753 /*
1754 * Give priority to physical handlers (like #PF does).
1755 *
1756 * Hope for a lonely physical handler first that covers the whole
1757 * write area. This should be a pretty frequent case with MMIO and
1758 * the heavy usage of full page handlers in the page pool.
1759 */
1760 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1761 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1762 {
1763 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1764 if (pCur)
1765 {
1766 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1767 Assert(pCur->CTX_SUFF(pfnHandler));
1768
1769 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1770 if (cbRange > cbWrite)
1771 cbRange = cbWrite;
1772
1773#ifndef IN_RING3
1774 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1775 NOREF(cbRange);
1776 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1777 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1778
1779#else /* IN_RING3 */
1780 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1781 if (!PGM_PAGE_IS_MMIO(pPage))
1782 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1783 else
1784 rc = VINF_SUCCESS;
1785 if (RT_SUCCESS(rc))
1786 {
1787 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
1788 void *pvUser = pCur->CTX_SUFF(pvUser);
1789
1790 STAM_PROFILE_START(&pCur->Stat, h);
1791 Assert(PGMIsLockOwner(pVM));
1792 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1793 pgmUnlock(pVM);
1794 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
1795 pgmLock(pVM);
1796# ifdef VBOX_WITH_STATISTICS
1797 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1798 if (pCur)
1799 STAM_PROFILE_STOP(&pCur->Stat, h);
1800# else
1801 pCur = NULL; /* might not be valid anymore. */
1802# endif
1803 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1804 memcpy(pvDst, pvBuf, cbRange);
1805 else
1806 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
1807 }
1808 else
1809 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1810 GCPhys, pPage, rc), rc);
1811 if (RT_LIKELY(cbRange == cbWrite))
1812 return VINF_SUCCESS;
1813
1814 /* more fun to be had below */
1815 cbWrite -= cbRange;
1816 GCPhys += cbRange;
1817 pvBuf = (uint8_t *)pvBuf + cbRange;
1818 pvDst = (uint8_t *)pvDst + cbRange;
1819#endif /* IN_RING3 */
1820 }
1821 /* else: the handler is somewhere else in the page, deal with it below. */
1822 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1823 }
1824 /*
1825 * A virtual handler without any interfering physical handlers.
1826 * Hopefully it'll conver the whole write.
1827 */
1828 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1829 {
1830 unsigned iPage;
1831 PPGMVIRTHANDLER pCur;
1832 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1833 if (RT_SUCCESS(rc))
1834 {
1835 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1836 if (cbRange > cbWrite)
1837 cbRange = cbWrite;
1838
1839#ifndef IN_RING3
1840 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1841 NOREF(cbRange);
1842 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1843 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1844
1845#else /* IN_RING3 */
1846
1847 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1848 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1849 if (RT_SUCCESS(rc))
1850 {
1851 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1852 if (pCur->pfnHandlerR3)
1853 {
1854 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1855 + (iPage << PAGE_SHIFT)
1856 + (GCPhys & PAGE_OFFSET_MASK);
1857
1858 STAM_PROFILE_START(&pCur->Stat, h);
1859 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1860 STAM_PROFILE_STOP(&pCur->Stat, h);
1861 }
1862 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1863 memcpy(pvDst, pvBuf, cbRange);
1864 else
1865 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1866 }
1867 else
1868 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1869 GCPhys, pPage, rc), rc);
1870 if (RT_LIKELY(cbRange == cbWrite))
1871 return VINF_SUCCESS;
1872
1873 /* more fun to be had below */
1874 cbWrite -= cbRange;
1875 GCPhys += cbRange;
1876 pvBuf = (uint8_t *)pvBuf + cbRange;
1877 pvDst = (uint8_t *)pvDst + cbRange;
1878#endif
1879 }
1880 /* else: the handler is somewhere else in the page, deal with it below. */
1881 }
1882
1883 /*
1884 * Deal with all the odd ends.
1885 */
1886
1887 /* We need a writable destination page. */
1888 if (!pvDst)
1889 {
1890 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1891 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1892 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1893 GCPhys, pPage, rc), rc);
1894 }
1895
1896 /* The loop state (big + ugly). */
1897 unsigned iVirtPage = 0;
1898 PPGMVIRTHANDLER pVirt = NULL;
1899 uint32_t offVirt = PAGE_SIZE;
1900 uint32_t offVirtLast = PAGE_SIZE;
1901 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1902
1903 PPGMPHYSHANDLER pPhys = NULL;
1904 uint32_t offPhys = PAGE_SIZE;
1905 uint32_t offPhysLast = PAGE_SIZE;
1906 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1907
1908 /* The loop. */
1909 for (;;)
1910 {
1911 /*
1912 * Find the closest handler at or above GCPhys.
1913 */
1914 if (fMoreVirt && !pVirt)
1915 {
1916 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1917 if (RT_SUCCESS(rc))
1918 {
1919 offVirt = 0;
1920 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1921 }
1922 else
1923 {
1924 PPGMPHYS2VIRTHANDLER pVirtPhys;
1925 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1926 GCPhys, true /* fAbove */);
1927 if ( pVirtPhys
1928 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1929 {
1930 /* ASSUME that pVirtPhys only covers one page. */
1931 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1932 Assert(pVirtPhys->Core.Key > GCPhys);
1933
1934 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1935 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1936 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1937 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1938 }
1939 else
1940 {
1941 pVirt = NULL;
1942 fMoreVirt = false;
1943 offVirt = offVirtLast = PAGE_SIZE;
1944 }
1945 }
1946 }
1947
1948 if (fMorePhys && !pPhys)
1949 {
1950 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1951 if (pPhys)
1952 {
1953 offPhys = 0;
1954 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1955 }
1956 else
1957 {
1958 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
1959 GCPhys, true /* fAbove */);
1960 if ( pPhys
1961 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
1962 {
1963 offPhys = pPhys->Core.Key - GCPhys;
1964 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1965 }
1966 else
1967 {
1968 pPhys = NULL;
1969 fMorePhys = false;
1970 offPhys = offPhysLast = PAGE_SIZE;
1971 }
1972 }
1973 }
1974
1975 /*
1976 * Handle access to space without handlers (that's easy).
1977 */
1978 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1979 uint32_t cbRange = (uint32_t)cbWrite;
1980 if (offPhys && offVirt)
1981 {
1982 if (cbRange > offPhys)
1983 cbRange = offPhys;
1984 if (cbRange > offVirt)
1985 cbRange = offVirt;
1986 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
1987 }
1988 /*
1989 * Physical handler.
1990 */
1991 else if (!offPhys && offVirt)
1992 {
1993 if (cbRange > offPhysLast + 1)
1994 cbRange = offPhysLast + 1;
1995 if (cbRange > offVirt)
1996 cbRange = offVirt;
1997#ifdef IN_RING3
1998 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1999 void *pvUser = pPhys->CTX_SUFF(pvUser);
2000
2001 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2002 STAM_PROFILE_START(&pPhys->Stat, h);
2003 Assert(PGMIsLockOwner(pVM));
2004 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2005 pgmUnlock(pVM);
2006 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2007 pgmLock(pVM);
2008# ifdef VBOX_WITH_STATISTICS
2009 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2010 if (pPhys)
2011 STAM_PROFILE_STOP(&pPhys->Stat, h);
2012# else
2013 pPhys = NULL; /* might not be valid anymore. */
2014# endif
2015 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2016#else
2017 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2018 NOREF(cbRange);
2019 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2020 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2021#endif
2022 }
2023 /*
2024 * Virtual handler.
2025 */
2026 else if (offPhys && !offVirt)
2027 {
2028 if (cbRange > offVirtLast + 1)
2029 cbRange = offVirtLast + 1;
2030 if (cbRange > offPhys)
2031 cbRange = offPhys;
2032#ifdef IN_RING3
2033 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2034 if (pVirt->pfnHandlerR3)
2035 {
2036 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2037 + (iVirtPage << PAGE_SHIFT)
2038 + (GCPhys & PAGE_OFFSET_MASK);
2039 STAM_PROFILE_START(&pVirt->Stat, h);
2040 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2041 STAM_PROFILE_STOP(&pVirt->Stat, h);
2042 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2043 }
2044 pVirt = NULL;
2045#else
2046 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2047 NOREF(cbRange);
2048 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2049 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2050#endif
2051 }
2052 /*
2053 * Both... give the physical one priority.
2054 */
2055 else
2056 {
2057 Assert(!offPhys && !offVirt);
2058 if (cbRange > offVirtLast + 1)
2059 cbRange = offVirtLast + 1;
2060 if (cbRange > offPhysLast + 1)
2061 cbRange = offPhysLast + 1;
2062
2063#ifdef IN_RING3
2064 if (pVirt->pfnHandlerR3)
2065 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2066 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2067
2068 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2069 void *pvUser = pPhys->CTX_SUFF(pvUser);
2070
2071 STAM_PROFILE_START(&pPhys->Stat, h);
2072 Assert(PGMIsLockOwner(pVM));
2073 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2074 pgmUnlock(pVM);
2075 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2076 pgmLock(pVM);
2077# ifdef VBOX_WITH_STATISTICS
2078 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2079 if (pPhys)
2080 STAM_PROFILE_STOP(&pPhys->Stat, h);
2081# else
2082 pPhys = NULL; /* might not be valid anymore. */
2083# endif
2084 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2085 if (pVirt->pfnHandlerR3)
2086 {
2087
2088 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2089 + (iVirtPage << PAGE_SHIFT)
2090 + (GCPhys & PAGE_OFFSET_MASK);
2091 STAM_PROFILE_START(&pVirt->Stat, h);
2092 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2093 STAM_PROFILE_STOP(&pVirt->Stat, h);
2094 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2095 rc = VINF_SUCCESS;
2096 else
2097 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2098 }
2099 pPhys = NULL;
2100 pVirt = NULL;
2101#else
2102 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2103 NOREF(cbRange);
2104 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2105 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2106#endif
2107 }
2108 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2109 memcpy(pvDst, pvBuf, cbRange);
2110
2111 /*
2112 * Advance if we've got more stuff to do.
2113 */
2114 if (cbRange >= cbWrite)
2115 return VINF_SUCCESS;
2116
2117 cbWrite -= cbRange;
2118 GCPhys += cbRange;
2119 pvBuf = (uint8_t *)pvBuf + cbRange;
2120 pvDst = (uint8_t *)pvDst + cbRange;
2121
2122 offPhys -= cbRange;
2123 offPhysLast -= cbRange;
2124 offVirt -= cbRange;
2125 offVirtLast -= cbRange;
2126 }
2127}
2128
2129
2130/**
2131 * Write to physical memory.
2132 *
2133 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2134 * want to ignore those.
2135 *
2136 * @returns VBox status code. Can be ignored in ring-3.
2137 * @retval VINF_SUCCESS.
2138 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2139 *
2140 * @param pVM VM Handle.
2141 * @param GCPhys Physical address to write to.
2142 * @param pvBuf What to write.
2143 * @param cbWrite How many bytes to write.
2144 */
2145VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2146{
2147 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2148 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2149 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2150
2151 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2152 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2153
2154 pgmLock(pVM);
2155
2156 /*
2157 * Copy loop on ram ranges.
2158 */
2159 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2160 for (;;)
2161 {
2162 /* Find range. */
2163 while (pRam && GCPhys > pRam->GCPhysLast)
2164 pRam = pRam->CTX_SUFF(pNext);
2165 /* Inside range or not? */
2166 if (pRam && GCPhys >= pRam->GCPhys)
2167 {
2168 /*
2169 * Must work our way thru this page by page.
2170 */
2171 RTGCPTR off = GCPhys - pRam->GCPhys;
2172 while (off < pRam->cb)
2173 {
2174 RTGCPTR iPage = off >> PAGE_SHIFT;
2175 PPGMPAGE pPage = &pRam->aPages[iPage];
2176 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2177 if (cb > cbWrite)
2178 cb = cbWrite;
2179
2180 /*
2181 * Any active WRITE or ALL access handlers?
2182 */
2183 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2184 {
2185 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2186 if (RT_FAILURE(rc))
2187 {
2188 pgmUnlock(pVM);
2189 return rc;
2190 }
2191 }
2192 else
2193 {
2194 /*
2195 * Get the pointer to the page.
2196 */
2197 void *pvDst;
2198 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2199 if (RT_SUCCESS(rc))
2200 memcpy(pvDst, pvBuf, cb);
2201 else
2202 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2203 pRam->GCPhys + off, pPage, rc));
2204 }
2205
2206 /* next page */
2207 if (cb >= cbWrite)
2208 {
2209 pgmUnlock(pVM);
2210 return VINF_SUCCESS;
2211 }
2212
2213 cbWrite -= cb;
2214 off += cb;
2215 pvBuf = (const char *)pvBuf + cb;
2216 } /* walk pages in ram range */
2217
2218 GCPhys = pRam->GCPhysLast + 1;
2219 }
2220 else
2221 {
2222 /*
2223 * Unassigned address space, skip it.
2224 */
2225 if (!pRam)
2226 break;
2227 size_t cb = pRam->GCPhys - GCPhys;
2228 if (cb >= cbWrite)
2229 break;
2230 cbWrite -= cb;
2231 pvBuf = (const char *)pvBuf + cb;
2232 GCPhys += cb;
2233 }
2234 } /* Ram range walk */
2235
2236 pgmUnlock(pVM);
2237 return VINF_SUCCESS;
2238}
2239
2240
2241/**
2242 * Read from guest physical memory by GC physical address, bypassing
2243 * MMIO and access handlers.
2244 *
2245 * @returns VBox status.
2246 * @param pVM VM handle.
2247 * @param pvDst The destination address.
2248 * @param GCPhysSrc The source address (GC physical address).
2249 * @param cb The number of bytes to read.
2250 */
2251VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2252{
2253 /*
2254 * Treat the first page as a special case.
2255 */
2256 if (!cb)
2257 return VINF_SUCCESS;
2258
2259 /* map the 1st page */
2260 void const *pvSrc;
2261 PGMPAGEMAPLOCK Lock;
2262 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2263 if (RT_FAILURE(rc))
2264 return rc;
2265
2266 /* optimize for the case where access is completely within the first page. */
2267 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2268 if (RT_LIKELY(cb <= cbPage))
2269 {
2270 memcpy(pvDst, pvSrc, cb);
2271 PGMPhysReleasePageMappingLock(pVM, &Lock);
2272 return VINF_SUCCESS;
2273 }
2274
2275 /* copy to the end of the page. */
2276 memcpy(pvDst, pvSrc, cbPage);
2277 PGMPhysReleasePageMappingLock(pVM, &Lock);
2278 GCPhysSrc += cbPage;
2279 pvDst = (uint8_t *)pvDst + cbPage;
2280 cb -= cbPage;
2281
2282 /*
2283 * Page by page.
2284 */
2285 for (;;)
2286 {
2287 /* map the page */
2288 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2289 if (RT_FAILURE(rc))
2290 return rc;
2291
2292 /* last page? */
2293 if (cb <= PAGE_SIZE)
2294 {
2295 memcpy(pvDst, pvSrc, cb);
2296 PGMPhysReleasePageMappingLock(pVM, &Lock);
2297 return VINF_SUCCESS;
2298 }
2299
2300 /* copy the entire page and advance */
2301 memcpy(pvDst, pvSrc, PAGE_SIZE);
2302 PGMPhysReleasePageMappingLock(pVM, &Lock);
2303 GCPhysSrc += PAGE_SIZE;
2304 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2305 cb -= PAGE_SIZE;
2306 }
2307 /* won't ever get here. */
2308}
2309
2310
2311/**
2312 * Write to guest physical memory referenced by GC pointer.
2313 * Write memory to GC physical address in guest physical memory.
2314 *
2315 * This will bypass MMIO and access handlers.
2316 *
2317 * @returns VBox status.
2318 * @param pVM VM handle.
2319 * @param GCPhysDst The GC physical address of the destination.
2320 * @param pvSrc The source buffer.
2321 * @param cb The number of bytes to write.
2322 */
2323VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2324{
2325 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2326
2327 /*
2328 * Treat the first page as a special case.
2329 */
2330 if (!cb)
2331 return VINF_SUCCESS;
2332
2333 /* map the 1st page */
2334 void *pvDst;
2335 PGMPAGEMAPLOCK Lock;
2336 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2337 if (RT_FAILURE(rc))
2338 return rc;
2339
2340 /* optimize for the case where access is completely within the first page. */
2341 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2342 if (RT_LIKELY(cb <= cbPage))
2343 {
2344 memcpy(pvDst, pvSrc, cb);
2345 PGMPhysReleasePageMappingLock(pVM, &Lock);
2346 return VINF_SUCCESS;
2347 }
2348
2349 /* copy to the end of the page. */
2350 memcpy(pvDst, pvSrc, cbPage);
2351 PGMPhysReleasePageMappingLock(pVM, &Lock);
2352 GCPhysDst += cbPage;
2353 pvSrc = (const uint8_t *)pvSrc + cbPage;
2354 cb -= cbPage;
2355
2356 /*
2357 * Page by page.
2358 */
2359 for (;;)
2360 {
2361 /* map the page */
2362 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2363 if (RT_FAILURE(rc))
2364 return rc;
2365
2366 /* last page? */
2367 if (cb <= PAGE_SIZE)
2368 {
2369 memcpy(pvDst, pvSrc, cb);
2370 PGMPhysReleasePageMappingLock(pVM, &Lock);
2371 return VINF_SUCCESS;
2372 }
2373
2374 /* copy the entire page and advance */
2375 memcpy(pvDst, pvSrc, PAGE_SIZE);
2376 PGMPhysReleasePageMappingLock(pVM, &Lock);
2377 GCPhysDst += PAGE_SIZE;
2378 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2379 cb -= PAGE_SIZE;
2380 }
2381 /* won't ever get here. */
2382}
2383
2384
2385/**
2386 * Read from guest physical memory referenced by GC pointer.
2387 *
2388 * This function uses the current CR3/CR0/CR4 of the guest and will
2389 * bypass access handlers and not set any accessed bits.
2390 *
2391 * @returns VBox status.
2392 * @param pVCpu The VMCPU handle.
2393 * @param pvDst The destination address.
2394 * @param GCPtrSrc The source address (GC pointer).
2395 * @param cb The number of bytes to read.
2396 */
2397VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2398{
2399 PVM pVM = pVCpu->CTX_SUFF(pVM);
2400
2401 /*
2402 * Treat the first page as a special case.
2403 */
2404 if (!cb)
2405 return VINF_SUCCESS;
2406
2407 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2408 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2409
2410 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2411 * when many VCPUs are fighting for the lock.
2412 */
2413 pgmLock(pVM);
2414
2415 /* map the 1st page */
2416 void const *pvSrc;
2417 PGMPAGEMAPLOCK Lock;
2418 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2419 if (RT_FAILURE(rc))
2420 {
2421 pgmUnlock(pVM);
2422 return rc;
2423 }
2424
2425 /* optimize for the case where access is completely within the first page. */
2426 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2427 if (RT_LIKELY(cb <= cbPage))
2428 {
2429 memcpy(pvDst, pvSrc, cb);
2430 PGMPhysReleasePageMappingLock(pVM, &Lock);
2431 pgmUnlock(pVM);
2432 return VINF_SUCCESS;
2433 }
2434
2435 /* copy to the end of the page. */
2436 memcpy(pvDst, pvSrc, cbPage);
2437 PGMPhysReleasePageMappingLock(pVM, &Lock);
2438 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2439 pvDst = (uint8_t *)pvDst + cbPage;
2440 cb -= cbPage;
2441
2442 /*
2443 * Page by page.
2444 */
2445 for (;;)
2446 {
2447 /* map the page */
2448 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2449 if (RT_FAILURE(rc))
2450 {
2451 pgmUnlock(pVM);
2452 return rc;
2453 }
2454
2455 /* last page? */
2456 if (cb <= PAGE_SIZE)
2457 {
2458 memcpy(pvDst, pvSrc, cb);
2459 PGMPhysReleasePageMappingLock(pVM, &Lock);
2460 pgmUnlock(pVM);
2461 return VINF_SUCCESS;
2462 }
2463
2464 /* copy the entire page and advance */
2465 memcpy(pvDst, pvSrc, PAGE_SIZE);
2466 PGMPhysReleasePageMappingLock(pVM, &Lock);
2467 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2468 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2469 cb -= PAGE_SIZE;
2470 }
2471 /* won't ever get here. */
2472}
2473
2474
2475/**
2476 * Write to guest physical memory referenced by GC pointer.
2477 *
2478 * This function uses the current CR3/CR0/CR4 of the guest and will
2479 * bypass access handlers and not set dirty or accessed bits.
2480 *
2481 * @returns VBox status.
2482 * @param pVCpu The VMCPU handle.
2483 * @param GCPtrDst The destination address (GC pointer).
2484 * @param pvSrc The source address.
2485 * @param cb The number of bytes to write.
2486 */
2487VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2488{
2489 PVM pVM = pVCpu->CTX_SUFF(pVM);
2490
2491 /*
2492 * Treat the first page as a special case.
2493 */
2494 if (!cb)
2495 return VINF_SUCCESS;
2496
2497 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2498 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2499
2500 /* map the 1st page */
2501 void *pvDst;
2502 PGMPAGEMAPLOCK Lock;
2503 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2504 if (RT_FAILURE(rc))
2505 return rc;
2506
2507 /* optimize for the case where access is completely within the first page. */
2508 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2509 if (RT_LIKELY(cb <= cbPage))
2510 {
2511 memcpy(pvDst, pvSrc, cb);
2512 PGMPhysReleasePageMappingLock(pVM, &Lock);
2513 return VINF_SUCCESS;
2514 }
2515
2516 /* copy to the end of the page. */
2517 memcpy(pvDst, pvSrc, cbPage);
2518 PGMPhysReleasePageMappingLock(pVM, &Lock);
2519 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2520 pvSrc = (const uint8_t *)pvSrc + cbPage;
2521 cb -= cbPage;
2522
2523 /*
2524 * Page by page.
2525 */
2526 for (;;)
2527 {
2528 /* map the page */
2529 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2530 if (RT_FAILURE(rc))
2531 return rc;
2532
2533 /* last page? */
2534 if (cb <= PAGE_SIZE)
2535 {
2536 memcpy(pvDst, pvSrc, cb);
2537 PGMPhysReleasePageMappingLock(pVM, &Lock);
2538 return VINF_SUCCESS;
2539 }
2540
2541 /* copy the entire page and advance */
2542 memcpy(pvDst, pvSrc, PAGE_SIZE);
2543 PGMPhysReleasePageMappingLock(pVM, &Lock);
2544 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2545 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2546 cb -= PAGE_SIZE;
2547 }
2548 /* won't ever get here. */
2549}
2550
2551
2552/**
2553 * Write to guest physical memory referenced by GC pointer and update the PTE.
2554 *
2555 * This function uses the current CR3/CR0/CR4 of the guest and will
2556 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2557 *
2558 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2559 *
2560 * @returns VBox status.
2561 * @param pVCpu The VMCPU handle.
2562 * @param GCPtrDst The destination address (GC pointer).
2563 * @param pvSrc The source address.
2564 * @param cb The number of bytes to write.
2565 */
2566VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2567{
2568 PVM pVM = pVCpu->CTX_SUFF(pVM);
2569
2570 /*
2571 * Treat the first page as a special case.
2572 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2573 */
2574 if (!cb)
2575 return VINF_SUCCESS;
2576
2577 /* map the 1st page */
2578 void *pvDst;
2579 PGMPAGEMAPLOCK Lock;
2580 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2581 if (RT_FAILURE(rc))
2582 return rc;
2583
2584 /* optimize for the case where access is completely within the first page. */
2585 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2586 if (RT_LIKELY(cb <= cbPage))
2587 {
2588 memcpy(pvDst, pvSrc, cb);
2589 PGMPhysReleasePageMappingLock(pVM, &Lock);
2590 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2591 return VINF_SUCCESS;
2592 }
2593
2594 /* copy to the end of the page. */
2595 memcpy(pvDst, pvSrc, cbPage);
2596 PGMPhysReleasePageMappingLock(pVM, &Lock);
2597 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2598 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2599 pvSrc = (const uint8_t *)pvSrc + cbPage;
2600 cb -= cbPage;
2601
2602 /*
2603 * Page by page.
2604 */
2605 for (;;)
2606 {
2607 /* map the page */
2608 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2609 if (RT_FAILURE(rc))
2610 return rc;
2611
2612 /* last page? */
2613 if (cb <= PAGE_SIZE)
2614 {
2615 memcpy(pvDst, pvSrc, cb);
2616 PGMPhysReleasePageMappingLock(pVM, &Lock);
2617 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2618 return VINF_SUCCESS;
2619 }
2620
2621 /* copy the entire page and advance */
2622 memcpy(pvDst, pvSrc, PAGE_SIZE);
2623 PGMPhysReleasePageMappingLock(pVM, &Lock);
2624 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2625 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2626 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2627 cb -= PAGE_SIZE;
2628 }
2629 /* won't ever get here. */
2630}
2631
2632
2633/**
2634 * Read from guest physical memory referenced by GC pointer.
2635 *
2636 * This function uses the current CR3/CR0/CR4 of the guest and will
2637 * respect access handlers and set accessed bits.
2638 *
2639 * @returns VBox status.
2640 * @param pVCpu The VMCPU handle.
2641 * @param pvDst The destination address.
2642 * @param GCPtrSrc The source address (GC pointer).
2643 * @param cb The number of bytes to read.
2644 * @thread The vCPU EMT.
2645 */
2646VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2647{
2648 RTGCPHYS GCPhys;
2649 uint64_t fFlags;
2650 int rc;
2651 PVM pVM = pVCpu->CTX_SUFF(pVM);
2652
2653 /*
2654 * Anything to do?
2655 */
2656 if (!cb)
2657 return VINF_SUCCESS;
2658
2659 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2660
2661 /*
2662 * Optimize reads within a single page.
2663 */
2664 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2665 {
2666 /* Convert virtual to physical address + flags */
2667 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2668 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2669 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2670
2671 /* mark the guest page as accessed. */
2672 if (!(fFlags & X86_PTE_A))
2673 {
2674 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2675 AssertRC(rc);
2676 }
2677
2678 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2679 }
2680
2681 /*
2682 * Page by page.
2683 */
2684 for (;;)
2685 {
2686 /* Convert virtual to physical address + flags */
2687 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2688 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2689 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2690
2691 /* mark the guest page as accessed. */
2692 if (!(fFlags & X86_PTE_A))
2693 {
2694 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2695 AssertRC(rc);
2696 }
2697
2698 /* copy */
2699 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2700 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2701 if (cbRead >= cb || RT_FAILURE(rc))
2702 return rc;
2703
2704 /* next */
2705 cb -= cbRead;
2706 pvDst = (uint8_t *)pvDst + cbRead;
2707 GCPtrSrc += cbRead;
2708 }
2709}
2710
2711
2712/**
2713 * Write to guest physical memory referenced by GC pointer.
2714 *
2715 * This function uses the current CR3/CR0/CR4 of the guest and will
2716 * respect access handlers and set dirty and accessed bits.
2717 *
2718 * @returns VBox status.
2719 * @retval VINF_SUCCESS.
2720 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2721 *
2722 * @param pVCpu The VMCPU handle.
2723 * @param GCPtrDst The destination address (GC pointer).
2724 * @param pvSrc The source address.
2725 * @param cb The number of bytes to write.
2726 */
2727VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2728{
2729 RTGCPHYS GCPhys;
2730 uint64_t fFlags;
2731 int rc;
2732 PVM pVM = pVCpu->CTX_SUFF(pVM);
2733
2734 /*
2735 * Anything to do?
2736 */
2737 if (!cb)
2738 return VINF_SUCCESS;
2739
2740 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2741
2742 /*
2743 * Optimize writes within a single page.
2744 */
2745 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2746 {
2747 /* Convert virtual to physical address + flags */
2748 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2749 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2750 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2751
2752 /* Mention when we ignore X86_PTE_RW... */
2753 if (!(fFlags & X86_PTE_RW))
2754 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2755
2756 /* Mark the guest page as accessed and dirty if necessary. */
2757 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2758 {
2759 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2760 AssertRC(rc);
2761 }
2762
2763 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2764 }
2765
2766 /*
2767 * Page by page.
2768 */
2769 for (;;)
2770 {
2771 /* Convert virtual to physical address + flags */
2772 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2773 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2774 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2775
2776 /* Mention when we ignore X86_PTE_RW... */
2777 if (!(fFlags & X86_PTE_RW))
2778 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2779
2780 /* Mark the guest page as accessed and dirty if necessary. */
2781 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2782 {
2783 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2784 AssertRC(rc);
2785 }
2786
2787 /* copy */
2788 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2789 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2790 if (cbWrite >= cb || RT_FAILURE(rc))
2791 return rc;
2792
2793 /* next */
2794 cb -= cbWrite;
2795 pvSrc = (uint8_t *)pvSrc + cbWrite;
2796 GCPtrDst += cbWrite;
2797 }
2798}
2799
2800
2801/**
2802 * Performs a read of guest virtual memory for instruction emulation.
2803 *
2804 * This will check permissions, raise exceptions and update the access bits.
2805 *
2806 * The current implementation will bypass all access handlers. It may later be
2807 * changed to at least respect MMIO.
2808 *
2809 *
2810 * @returns VBox status code suitable to scheduling.
2811 * @retval VINF_SUCCESS if the read was performed successfully.
2812 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2813 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2814 *
2815 * @param pVCpu The VMCPU handle.
2816 * @param pCtxCore The context core.
2817 * @param pvDst Where to put the bytes we've read.
2818 * @param GCPtrSrc The source address.
2819 * @param cb The number of bytes to read. Not more than a page.
2820 *
2821 * @remark This function will dynamically map physical pages in GC. This may unmap
2822 * mappings done by the caller. Be careful!
2823 */
2824VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2825{
2826 PVM pVM = pVCpu->CTX_SUFF(pVM);
2827 Assert(cb <= PAGE_SIZE);
2828
2829/** @todo r=bird: This isn't perfect!
2830 * -# It's not checking for reserved bits being 1.
2831 * -# It's not correctly dealing with the access bit.
2832 * -# It's not respecting MMIO memory or any other access handlers.
2833 */
2834 /*
2835 * 1. Translate virtual to physical. This may fault.
2836 * 2. Map the physical address.
2837 * 3. Do the read operation.
2838 * 4. Set access bits if required.
2839 */
2840 int rc;
2841 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2842 if (cb <= cb1)
2843 {
2844 /*
2845 * Not crossing pages.
2846 */
2847 RTGCPHYS GCPhys;
2848 uint64_t fFlags;
2849 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
2850 if (RT_SUCCESS(rc))
2851 {
2852 /** @todo we should check reserved bits ... */
2853 void *pvSrc;
2854 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2855 switch (rc)
2856 {
2857 case VINF_SUCCESS:
2858 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2859 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2860 break;
2861 case VERR_PGM_PHYS_PAGE_RESERVED:
2862 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2863 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2864 break;
2865 default:
2866 return rc;
2867 }
2868
2869 /** @todo access bit emulation isn't 100% correct. */
2870 if (!(fFlags & X86_PTE_A))
2871 {
2872 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2873 AssertRC(rc);
2874 }
2875 return VINF_SUCCESS;
2876 }
2877 }
2878 else
2879 {
2880 /*
2881 * Crosses pages.
2882 */
2883 size_t cb2 = cb - cb1;
2884 uint64_t fFlags1;
2885 RTGCPHYS GCPhys1;
2886 uint64_t fFlags2;
2887 RTGCPHYS GCPhys2;
2888 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
2889 if (RT_SUCCESS(rc))
2890 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2891 if (RT_SUCCESS(rc))
2892 {
2893 /** @todo we should check reserved bits ... */
2894 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2895 void *pvSrc1;
2896 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2897 switch (rc)
2898 {
2899 case VINF_SUCCESS:
2900 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2901 break;
2902 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2903 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2904 break;
2905 default:
2906 return rc;
2907 }
2908
2909 void *pvSrc2;
2910 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2911 switch (rc)
2912 {
2913 case VINF_SUCCESS:
2914 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2915 break;
2916 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2917 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2918 break;
2919 default:
2920 return rc;
2921 }
2922
2923 if (!(fFlags1 & X86_PTE_A))
2924 {
2925 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2926 AssertRC(rc);
2927 }
2928 if (!(fFlags2 & X86_PTE_A))
2929 {
2930 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2931 AssertRC(rc);
2932 }
2933 return VINF_SUCCESS;
2934 }
2935 }
2936
2937 /*
2938 * Raise a #PF.
2939 */
2940 uint32_t uErr;
2941
2942 /* Get the current privilege level. */
2943 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
2944 switch (rc)
2945 {
2946 case VINF_SUCCESS:
2947 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2948 break;
2949
2950 case VERR_PAGE_NOT_PRESENT:
2951 case VERR_PAGE_TABLE_NOT_PRESENT:
2952 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2953 break;
2954
2955 default:
2956 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2957 return rc;
2958 }
2959 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2960 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2961}
2962
2963
2964/**
2965 * Performs a read of guest virtual memory for instruction emulation.
2966 *
2967 * This will check permissions, raise exceptions and update the access bits.
2968 *
2969 * The current implementation will bypass all access handlers. It may later be
2970 * changed to at least respect MMIO.
2971 *
2972 *
2973 * @returns VBox status code suitable to scheduling.
2974 * @retval VINF_SUCCESS if the read was performed successfully.
2975 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2976 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2977 *
2978 * @param pVCpu The VMCPU handle.
2979 * @param pCtxCore The context core.
2980 * @param pvDst Where to put the bytes we've read.
2981 * @param GCPtrSrc The source address.
2982 * @param cb The number of bytes to read. Not more than a page.
2983 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
2984 * an appropriate error status will be returned (no
2985 * informational at all).
2986 *
2987 *
2988 * @remarks Takes the PGM lock.
2989 * @remarks A page fault on the 2nd page of the access will be raised without
2990 * writing the bits on the first page since we're ASSUMING that the
2991 * caller is emulating an instruction access.
2992 * @remarks This function will dynamically map physical pages in GC. This may
2993 * unmap mappings done by the caller. Be careful!
2994 */
2995VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
2996{
2997 PVM pVM = pVCpu->CTX_SUFF(pVM);
2998 Assert(cb <= PAGE_SIZE);
2999
3000 /*
3001 * 1. Translate virtual to physical. This may fault.
3002 * 2. Map the physical address.
3003 * 3. Do the read operation.
3004 * 4. Set access bits if required.
3005 */
3006 int rc;
3007 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3008 if (cb <= cb1)
3009 {
3010 /*
3011 * Not crossing pages.
3012 */
3013 RTGCPHYS GCPhys;
3014 uint64_t fFlags;
3015 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3016 if (RT_SUCCESS(rc))
3017 {
3018 if (1) /** @todo we should check reserved bits ... */
3019 {
3020 const void *pvSrc;
3021 PGMPAGEMAPLOCK Lock;
3022 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3023 switch (rc)
3024 {
3025 case VINF_SUCCESS:
3026 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3027 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3028 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3029 break;
3030 case VERR_PGM_PHYS_PAGE_RESERVED:
3031 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3032 memset(pvDst, 0xff, cb);
3033 break;
3034 default:
3035 AssertMsgFailed(("%Rrc\n", rc));
3036 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3037 return rc;
3038 }
3039 PGMPhysReleasePageMappingLock(pVM, &Lock);
3040
3041 if (!(fFlags & X86_PTE_A))
3042 {
3043 /** @todo access bit emulation isn't 100% correct. */
3044 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3045 AssertRC(rc);
3046 }
3047 return VINF_SUCCESS;
3048 }
3049 }
3050 }
3051 else
3052 {
3053 /*
3054 * Crosses pages.
3055 */
3056 size_t cb2 = cb - cb1;
3057 uint64_t fFlags1;
3058 RTGCPHYS GCPhys1;
3059 uint64_t fFlags2;
3060 RTGCPHYS GCPhys2;
3061 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3062 if (RT_SUCCESS(rc))
3063 {
3064 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3065 if (RT_SUCCESS(rc))
3066 {
3067 if (1) /** @todo we should check reserved bits ... */
3068 {
3069 const void *pvSrc;
3070 PGMPAGEMAPLOCK Lock;
3071 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3072 switch (rc)
3073 {
3074 case VINF_SUCCESS:
3075 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3076 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3077 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3078 PGMPhysReleasePageMappingLock(pVM, &Lock);
3079 break;
3080 case VERR_PGM_PHYS_PAGE_RESERVED:
3081 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3082 memset(pvDst, 0xff, cb1);
3083 break;
3084 default:
3085 AssertMsgFailed(("%Rrc\n", rc));
3086 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3087 return rc;
3088 }
3089
3090 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3091 switch (rc)
3092 {
3093 case VINF_SUCCESS:
3094 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3095 PGMPhysReleasePageMappingLock(pVM, &Lock);
3096 break;
3097 case VERR_PGM_PHYS_PAGE_RESERVED:
3098 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3099 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3100 break;
3101 default:
3102 AssertMsgFailed(("%Rrc\n", rc));
3103 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3104 return rc;
3105 }
3106
3107 if (!(fFlags1 & X86_PTE_A))
3108 {
3109 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3110 AssertRC(rc);
3111 }
3112 if (!(fFlags2 & X86_PTE_A))
3113 {
3114 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3115 AssertRC(rc);
3116 }
3117 return VINF_SUCCESS;
3118 }
3119 /* sort out which page */
3120 }
3121 else
3122 GCPtrSrc += cb1; /* fault on 2nd page */
3123 }
3124 }
3125
3126 /*
3127 * Raise a #PF if we're allowed to do that.
3128 */
3129 /* Calc the error bits. */
3130 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3131 uint32_t uErr;
3132 switch (rc)
3133 {
3134 case VINF_SUCCESS:
3135 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3136 rc = VERR_ACCESS_DENIED;
3137 break;
3138
3139 case VERR_PAGE_NOT_PRESENT:
3140 case VERR_PAGE_TABLE_NOT_PRESENT:
3141 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3142 break;
3143
3144 default:
3145 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3146 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3147 return rc;
3148 }
3149 if (fRaiseTrap)
3150 {
3151 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3152 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3153 }
3154 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3155 return rc;
3156}
3157
3158
3159/**
3160 * Performs a write to guest virtual memory for instruction emulation.
3161 *
3162 * This will check permissions, raise exceptions and update the dirty and access
3163 * bits.
3164 *
3165 * @returns VBox status code suitable to scheduling.
3166 * @retval VINF_SUCCESS if the read was performed successfully.
3167 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3168 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3169 *
3170 * @param pVCpu The VMCPU handle.
3171 * @param pCtxCore The context core.
3172 * @param GCPtrDst The destination address.
3173 * @param pvSrc What to write.
3174 * @param cb The number of bytes to write. Not more than a page.
3175 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3176 * an appropriate error status will be returned (no
3177 * informational at all).
3178 *
3179 * @remarks Takes the PGM lock.
3180 * @remarks A page fault on the 2nd page of the access will be raised without
3181 * writing the bits on the first page since we're ASSUMING that the
3182 * caller is emulating an instruction access.
3183 * @remarks This function will dynamically map physical pages in GC. This may
3184 * unmap mappings done by the caller. Be careful!
3185 */
3186VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3187{
3188 Assert(cb <= PAGE_SIZE);
3189 PVM pVM = pVCpu->CTX_SUFF(pVM);
3190
3191 /*
3192 * 1. Translate virtual to physical. This may fault.
3193 * 2. Map the physical address.
3194 * 3. Do the write operation.
3195 * 4. Set access bits if required.
3196 */
3197 int rc;
3198 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3199 if (cb <= cb1)
3200 {
3201 /*
3202 * Not crossing pages.
3203 */
3204 RTGCPHYS GCPhys;
3205 uint64_t fFlags;
3206 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3207 if (RT_SUCCESS(rc))
3208 {
3209 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3210 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3211 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3212 {
3213 void *pvDst;
3214 PGMPAGEMAPLOCK Lock;
3215 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3216 switch (rc)
3217 {
3218 case VINF_SUCCESS:
3219 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3220 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3221 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3222 PGMPhysReleasePageMappingLock(pVM, &Lock);
3223 break;
3224 case VERR_PGM_PHYS_PAGE_RESERVED:
3225 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3226 /* bit bucket */
3227 break;
3228 default:
3229 AssertMsgFailed(("%Rrc\n", rc));
3230 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3231 return rc;
3232 }
3233
3234 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3235 {
3236 /** @todo dirty & access bit emulation isn't 100% correct. */
3237 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3238 AssertRC(rc);
3239 }
3240 return VINF_SUCCESS;
3241 }
3242 rc = VERR_ACCESS_DENIED;
3243 }
3244 }
3245 else
3246 {
3247 /*
3248 * Crosses pages.
3249 */
3250 size_t cb2 = cb - cb1;
3251 uint64_t fFlags1;
3252 RTGCPHYS GCPhys1;
3253 uint64_t fFlags2;
3254 RTGCPHYS GCPhys2;
3255 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3256 if (RT_SUCCESS(rc))
3257 {
3258 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3259 if (RT_SUCCESS(rc))
3260 {
3261 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3262 && (fFlags2 & X86_PTE_RW))
3263 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3264 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3265 {
3266 void *pvDst;
3267 PGMPAGEMAPLOCK Lock;
3268 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3269 switch (rc)
3270 {
3271 case VINF_SUCCESS:
3272 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3273 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3274 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3275 PGMPhysReleasePageMappingLock(pVM, &Lock);
3276 break;
3277 case VERR_PGM_PHYS_PAGE_RESERVED:
3278 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3279 /* bit bucket */
3280 break;
3281 default:
3282 AssertMsgFailed(("%Rrc\n", rc));
3283 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3284 return rc;
3285 }
3286
3287 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3288 switch (rc)
3289 {
3290 case VINF_SUCCESS:
3291 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3292 PGMPhysReleasePageMappingLock(pVM, &Lock);
3293 break;
3294 case VERR_PGM_PHYS_PAGE_RESERVED:
3295 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3296 /* bit bucket */
3297 break;
3298 default:
3299 AssertMsgFailed(("%Rrc\n", rc));
3300 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3301 return rc;
3302 }
3303
3304 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3305 {
3306 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3307 AssertRC(rc);
3308 }
3309 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3310 {
3311 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3312 AssertRC(rc);
3313 }
3314 return VINF_SUCCESS;
3315 }
3316 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3317 GCPtrDst += cb1; /* fault on the 2nd page. */
3318 rc = VERR_ACCESS_DENIED;
3319 }
3320 else
3321 GCPtrDst += cb1; /* fault on the 2nd page. */
3322 }
3323 }
3324
3325 /*
3326 * Raise a #PF if we're allowed to do that.
3327 */
3328 /* Calc the error bits. */
3329 uint32_t uErr;
3330 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3331 switch (rc)
3332 {
3333 case VINF_SUCCESS:
3334 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3335 rc = VERR_ACCESS_DENIED;
3336 break;
3337
3338 case VERR_ACCESS_DENIED:
3339 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3340 break;
3341
3342 case VERR_PAGE_NOT_PRESENT:
3343 case VERR_PAGE_TABLE_NOT_PRESENT:
3344 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3345 break;
3346
3347 default:
3348 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3349 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3350 return rc;
3351 }
3352 if (fRaiseTrap)
3353 {
3354 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3355 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3356 }
3357 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3358 return rc;
3359}
3360
3361
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette