VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 24702

Last change on this file since 24702 was 24702, checked in by vboxsync, 15 years ago

Disabled the physical TLB again; regressions

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 119.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 24702 2009-11-16 15:30:06Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 PVMCPU pVCpu = VMMGetCpu(pVM);
66
67 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
68 switch (pRom->aPages[iPage].enmProt)
69 {
70 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
71 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
72 {
73 /*
74 * If it's a simple instruction which doesn't change the cpu state
75 * we will simply skip it. Otherwise we'll have to defer it to REM.
76 */
77 uint32_t cbOp;
78 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
79 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
80 if ( RT_SUCCESS(rc)
81 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
82 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
83 {
84 switch (pDis->opcode)
85 {
86 /** @todo Find other instructions we can safely skip, possibly
87 * adding this kind of detection to DIS or EM. */
88 case OP_MOV:
89 pRegFrame->rip += cbOp;
90 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
91 return VINF_SUCCESS;
92 }
93 }
94 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
95 return rc;
96 break;
97 }
98
99 case PGMROMPROT_READ_RAM_WRITE_RAM:
100 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
101 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
102 AssertRC(rc);
103 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
104
105 case PGMROMPROT_READ_ROM_WRITE_RAM:
106 /* Handle it in ring-3 because it's *way* easier there. */
107 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
108 break;
109
110 default:
111 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
112 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
113 VERR_INTERNAL_ERROR);
114 }
115
116 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
117 return VINF_EM_RAW_EMULATE_INSTR;
118}
119
120#endif /* IN_RING3 */
121
122/**
123 * Checks if Address Gate 20 is enabled or not.
124 *
125 * @returns true if enabled.
126 * @returns false if disabled.
127 * @param pVCpu VMCPU handle.
128 */
129VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
130{
131 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
132 return pVCpu->pgm.s.fA20Enabled;
133}
134
135
136/**
137 * Validates a GC physical address.
138 *
139 * @returns true if valid.
140 * @returns false if invalid.
141 * @param pVM The VM handle.
142 * @param GCPhys The physical address to validate.
143 */
144VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
145{
146 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
147 return pPage != NULL;
148}
149
150
151/**
152 * Checks if a GC physical address is a normal page,
153 * i.e. not ROM, MMIO or reserved.
154 *
155 * @returns true if normal.
156 * @returns false if invalid, ROM, MMIO or reserved page.
157 * @param pVM The VM handle.
158 * @param GCPhys The physical address to check.
159 */
160VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
161{
162 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
163 return pPage
164 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
165}
166
167
168/**
169 * Converts a GC physical address to a HC physical address.
170 *
171 * @returns VINF_SUCCESS on success.
172 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
173 * page but has no physical backing.
174 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
175 * GC physical address.
176 *
177 * @param pVM The VM handle.
178 * @param GCPhys The GC physical address to convert.
179 * @param pHCPhys Where to store the HC physical address on success.
180 */
181VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
182{
183 pgmLock(pVM);
184 PPGMPAGE pPage;
185 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
186 if (RT_SUCCESS(rc))
187 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
188 pgmUnlock(pVM);
189 return rc;
190}
191
192
193/**
194 * Invalidates the all page mapping TLBs.
195 *
196 * @param pVM The VM handle.
197 */
198VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
199{
200 pgmLock(pVM);
201 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
202 {
203 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
204 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
205 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
206 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
207 }
208 /* @todo clear the RC TLB whenever we add it. */
209 pgmUnlock(pVM);
210}
211
212/**
213 * Makes sure that there is at least one handy page ready for use.
214 *
215 * This will also take the appropriate actions when reaching water-marks.
216 *
217 * @returns VBox status code.
218 * @retval VINF_SUCCESS on success.
219 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
220 *
221 * @param pVM The VM handle.
222 *
223 * @remarks Must be called from within the PGM critical section. It may
224 * nip back to ring-3/0 in some cases.
225 */
226static int pgmPhysEnsureHandyPage(PVM pVM)
227{
228 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
229
230 /*
231 * Do we need to do anything special?
232 */
233#ifdef IN_RING3
234 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
235#else
236 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
237#endif
238 {
239 /*
240 * Allocate pages only if we're out of them, or in ring-3, almost out.
241 */
242#ifdef IN_RING3
243 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
244#else
245 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
246#endif
247 {
248 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
249 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
250#ifdef IN_RING3
251 int rc = PGMR3PhysAllocateHandyPages(pVM);
252#else
253 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
254#endif
255 if (RT_UNLIKELY(rc != VINF_SUCCESS))
256 {
257 if (RT_FAILURE(rc))
258 return rc;
259 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
260 if (!pVM->pgm.s.cHandyPages)
261 {
262 LogRel(("PGM: no more handy pages!\n"));
263 return VERR_EM_NO_MEMORY;
264 }
265 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
266 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
267#ifdef IN_RING3
268 REMR3NotifyFF(pVM);
269#else
270 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
271#endif
272 }
273 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
274 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
275 ("%u\n", pVM->pgm.s.cHandyPages),
276 VERR_INTERNAL_ERROR);
277 }
278 else
279 {
280 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
281 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
282#ifndef IN_RING3
283 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
284 {
285 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
286 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
287 }
288#endif
289 }
290 }
291
292 return VINF_SUCCESS;
293}
294
295
296/**
297 * Replace a zero or shared page with new page that we can write to.
298 *
299 * @returns The following VBox status codes.
300 * @retval VINF_SUCCESS on success, pPage is modified.
301 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
302 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
303 *
304 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
305 *
306 * @param pVM The VM address.
307 * @param pPage The physical page tracking structure. This will
308 * be modified on success.
309 * @param GCPhys The address of the page.
310 *
311 * @remarks Must be called from within the PGM critical section. It may
312 * nip back to ring-3/0 in some cases.
313 *
314 * @remarks This function shouldn't really fail, however if it does
315 * it probably means we've screwed up the size of handy pages and/or
316 * the low-water mark. Or, that some device I/O is causing a lot of
317 * pages to be allocated while while the host is in a low-memory
318 * condition. This latter should be handled elsewhere and in a more
319 * controlled manner, it's on the @bugref{3170} todo list...
320 */
321int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
322{
323 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
324
325 /*
326 * Prereqs.
327 */
328 Assert(PGMIsLocked(pVM));
329 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
330 Assert(!PGM_PAGE_IS_MMIO(pPage));
331
332
333 /*
334 * Flush any shadow page table mappings of the page.
335 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
336 */
337 bool fFlushTLBs = false;
338 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
339 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
340
341 /*
342 * Ensure that we've got a page handy, take it and use it.
343 */
344 int rc2 = pgmPhysEnsureHandyPage(pVM);
345 if (RT_FAILURE(rc2))
346 {
347 if (fFlushTLBs)
348 PGM_INVL_ALL_VCPU_TLBS(pVM);
349 Assert(rc2 == VERR_EM_NO_MEMORY);
350 return rc2;
351 }
352 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
353 Assert(PGMIsLocked(pVM));
354 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
355 Assert(!PGM_PAGE_IS_MMIO(pPage));
356
357 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
358 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
359 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
360 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
361 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
362 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
363
364 /*
365 * There are one or two action to be taken the next time we allocate handy pages:
366 * - Tell the GMM (global memory manager) what the page is being used for.
367 * (Speeds up replacement operations - sharing and defragmenting.)
368 * - If the current backing is shared, it must be freed.
369 */
370 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
371 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
372
373 if (PGM_PAGE_IS_SHARED(pPage))
374 {
375 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
376 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
377 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
378
379 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
380 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
381 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
382 pVM->pgm.s.cSharedPages--;
383 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
384 }
385 else
386 {
387 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
388 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
389 pVM->pgm.s.cZeroPages--;
390 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
391 }
392
393 /*
394 * Do the PGMPAGE modifications.
395 */
396 pVM->pgm.s.cPrivatePages++;
397 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
398 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
399 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
400
401 if ( fFlushTLBs
402 && rc != VINF_PGM_GCPHYS_ALIASED)
403 PGM_INVL_ALL_VCPU_TLBS(pVM);
404 return rc;
405}
406
407
408/**
409 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
410 *
411 * @returns VBox strict status code.
412 * @retval VINF_SUCCESS on success.
413 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
414 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
415 *
416 * @param pVM The VM address.
417 * @param pPage The physical page tracking structure.
418 * @param GCPhys The address of the page.
419 *
420 * @remarks Called from within the PGM critical section.
421 */
422int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
423{
424 switch (PGM_PAGE_GET_STATE(pPage))
425 {
426 case PGM_PAGE_STATE_WRITE_MONITORED:
427 PGM_PAGE_SET_WRITTEN_TO(pPage);
428 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
429 Assert(pVM->pgm.s.cMonitoredPages > 0);
430 pVM->pgm.s.cMonitoredPages--;
431 pVM->pgm.s.cWrittenToPages++;
432 /* fall thru */
433 default: /* to shut up GCC */
434 case PGM_PAGE_STATE_ALLOCATED:
435 return VINF_SUCCESS;
436
437 /*
438 * Zero pages can be dummy pages for MMIO or reserved memory,
439 * so we need to check the flags before joining cause with
440 * shared page replacement.
441 */
442 case PGM_PAGE_STATE_ZERO:
443 if (PGM_PAGE_IS_MMIO(pPage))
444 return VERR_PGM_PHYS_PAGE_RESERVED;
445 /* fall thru */
446 case PGM_PAGE_STATE_SHARED:
447 return pgmPhysAllocPage(pVM, pPage, GCPhys);
448 }
449}
450
451
452/**
453 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
454 *
455 * @returns VBox strict status code.
456 * @retval VINF_SUCCESS on success.
457 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
458 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
459 *
460 * @param pVM The VM address.
461 * @param pPage The physical page tracking structure.
462 * @param GCPhys The address of the page.
463 */
464int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
465{
466 int rc = pgmLock(pVM);
467 if (RT_SUCCESS(rc))
468 {
469 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
470 pgmUnlock(pVM);
471 }
472 return rc;
473}
474
475
476/**
477 * Internal usage: Map the page specified by its GMM ID.
478 *
479 * This is similar to pgmPhysPageMap
480 *
481 * @returns VBox status code.
482 *
483 * @param pVM The VM handle.
484 * @param idPage The Page ID.
485 * @param HCPhys The physical address (for RC).
486 * @param ppv Where to store the mapping address.
487 *
488 * @remarks Called from within the PGM critical section. The mapping is only
489 * valid while your inside this section.
490 */
491int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
492{
493 /*
494 * Validation.
495 */
496 Assert(PGMIsLocked(pVM));
497 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
498 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
499 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
500
501#ifdef IN_RC
502 /*
503 * Map it by HCPhys.
504 */
505 return PGMDynMapHCPage(pVM, HCPhys, ppv);
506
507#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
508 /*
509 * Map it by HCPhys.
510 */
511 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
512
513#else
514 /*
515 * Find/make Chunk TLB entry for the mapping chunk.
516 */
517 PPGMCHUNKR3MAP pMap;
518 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
519 if (pTlbe->idChunk == idChunk)
520 {
521 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
522 pMap = pTlbe->pChunk;
523 }
524 else
525 {
526 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
527
528 /*
529 * Find the chunk, map it if necessary.
530 */
531 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
532 if (!pMap)
533 {
534# ifdef IN_RING0
535 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
536 AssertRCReturn(rc, rc);
537 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
538 Assert(pMap);
539# else
540 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
541 if (RT_FAILURE(rc))
542 return rc;
543# endif
544 }
545
546 /*
547 * Enter it into the Chunk TLB.
548 */
549 pTlbe->idChunk = idChunk;
550 pTlbe->pChunk = pMap;
551 pMap->iAge = 0;
552 }
553
554 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
555 return VINF_SUCCESS;
556#endif
557}
558
559
560/**
561 * Maps a page into the current virtual address space so it can be accessed.
562 *
563 * @returns VBox status code.
564 * @retval VINF_SUCCESS on success.
565 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
566 *
567 * @param pVM The VM address.
568 * @param pPage The physical page tracking structure.
569 * @param GCPhys The address of the page.
570 * @param ppMap Where to store the address of the mapping tracking structure.
571 * @param ppv Where to store the mapping address of the page. The page
572 * offset is masked off!
573 *
574 * @remarks Called from within the PGM critical section.
575 */
576static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
577{
578 Assert(PGMIsLocked(pVM));
579
580#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
581 /*
582 * Just some sketchy GC/R0-darwin code.
583 */
584 *ppMap = NULL;
585 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
586 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
587# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
588 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
589# else
590 PGMDynMapHCPage(pVM, HCPhys, ppv);
591# endif
592 return VINF_SUCCESS;
593
594#else /* IN_RING3 || IN_RING0 */
595
596
597 /*
598 * Special case: ZERO and MMIO2 pages.
599 */
600 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
601 if (idChunk == NIL_GMM_CHUNKID)
602 {
603 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
604 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
605 {
606 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
607 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
608 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
609 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
610 }
611 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
612 {
613 /** @todo deal with aliased MMIO2 pages somehow...
614 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
615 * them, that would also avoid this mess. It would actually be kind of
616 * elegant... */
617 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
618 }
619 else
620 {
621 /** @todo handle MMIO2 */
622 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
623 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
624 ("pPage=%R[pgmpage]\n", pPage),
625 VERR_INTERNAL_ERROR_2);
626 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
627 }
628 *ppMap = NULL;
629 return VINF_SUCCESS;
630 }
631
632 /*
633 * Find/make Chunk TLB entry for the mapping chunk.
634 */
635 PPGMCHUNKR3MAP pMap;
636 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
637 if (pTlbe->idChunk == idChunk)
638 {
639 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
640 pMap = pTlbe->pChunk;
641 }
642 else
643 {
644 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
645
646 /*
647 * Find the chunk, map it if necessary.
648 */
649 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
650 if (!pMap)
651 {
652#ifdef IN_RING0
653 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
654 AssertRCReturn(rc, rc);
655 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
656 Assert(pMap);
657#else
658 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
659 if (RT_FAILURE(rc))
660 return rc;
661#endif
662 }
663
664 /*
665 * Enter it into the Chunk TLB.
666 */
667 pTlbe->idChunk = idChunk;
668 pTlbe->pChunk = pMap;
669 pMap->iAge = 0;
670 }
671
672 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
673 *ppMap = pMap;
674 return VINF_SUCCESS;
675#endif /* IN_RING3 */
676}
677
678
679/**
680 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
681 *
682 * This is typically used is paths where we cannot use the TLB methods (like ROM
683 * pages) or where there is no point in using them since we won't get many hits.
684 *
685 * @returns VBox strict status code.
686 * @retval VINF_SUCCESS on success.
687 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
688 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
689 *
690 * @param pVM The VM address.
691 * @param pPage The physical page tracking structure.
692 * @param GCPhys The address of the page.
693 * @param ppv Where to store the mapping address of the page. The page
694 * offset is masked off!
695 *
696 * @remarks Called from within the PGM critical section. The mapping is only
697 * valid while your inside this section.
698 */
699int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
700{
701 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
702 if (RT_SUCCESS(rc))
703 {
704 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
705 PPGMPAGEMAP pMapIgnore;
706 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
707 if (RT_FAILURE(rc2)) /* preserve rc */
708 rc = rc2;
709 }
710 return rc;
711}
712
713
714/**
715 * Maps a page into the current virtual address space so it can be accessed for
716 * both writing and reading.
717 *
718 * This is typically used is paths where we cannot use the TLB methods (like ROM
719 * pages) or where there is no point in using them since we won't get many hits.
720 *
721 * @returns VBox status code.
722 * @retval VINF_SUCCESS on success.
723 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
724 *
725 * @param pVM The VM address.
726 * @param pPage The physical page tracking structure. Must be in the
727 * allocated state.
728 * @param GCPhys The address of the page.
729 * @param ppv Where to store the mapping address of the page. The page
730 * offset is masked off!
731 *
732 * @remarks Called from within the PGM critical section. The mapping is only
733 * valid while your inside this section.
734 */
735int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
736{
737 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
738 PPGMPAGEMAP pMapIgnore;
739 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
740}
741
742
743/**
744 * Maps a page into the current virtual address space so it can be accessed for
745 * reading.
746 *
747 * This is typically used is paths where we cannot use the TLB methods (like ROM
748 * pages) or where there is no point in using them since we won't get many hits.
749 *
750 * @returns VBox status code.
751 * @retval VINF_SUCCESS on success.
752 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
753 *
754 * @param pVM The VM address.
755 * @param pPage The physical page tracking structure.
756 * @param GCPhys The address of the page.
757 * @param ppv Where to store the mapping address of the page. The page
758 * offset is masked off!
759 *
760 * @remarks Called from within the PGM critical section. The mapping is only
761 * valid while your inside this section.
762 */
763int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
764{
765 PPGMPAGEMAP pMapIgnore;
766 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
767}
768
769
770#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
771/**
772 * Load a guest page into the ring-3 physical TLB.
773 *
774 * @returns VBox status code.
775 * @retval VINF_SUCCESS on success
776 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
777 * @param pPGM The PGM instance pointer.
778 * @param GCPhys The guest physical address in question.
779 */
780int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
781{
782 Assert(PGMIsLocked(PGM2VM(pPGM)));
783 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
784
785 /*
786 * Find the ram range.
787 * 99.8% of requests are expected to be in the first range.
788 */
789 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
790 RTGCPHYS off = GCPhys - pRam->GCPhys;
791 if (RT_UNLIKELY(off >= pRam->cb))
792 {
793 do
794 {
795 pRam = pRam->CTX_SUFF(pNext);
796 if (!pRam)
797 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
798 off = GCPhys - pRam->GCPhys;
799 } while (off >= pRam->cb);
800 }
801
802 /*
803 * Map the page.
804 * Make a special case for the zero page as it is kind of special.
805 */
806 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
807 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
808 if (!PGM_PAGE_IS_ZERO(pPage))
809 {
810 void *pv;
811 PPGMPAGEMAP pMap;
812 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
813 if (RT_FAILURE(rc))
814 return rc;
815 pTlbe->pMap = pMap;
816 pTlbe->pv = pv;
817 }
818 else
819 {
820 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
821 pTlbe->pMap = NULL;
822 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
823 }
824#if 0 /* broken */
825 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
826#endif
827 pTlbe->pPage = pPage;
828 return VINF_SUCCESS;
829}
830
831
832/**
833 * Load a guest page into the ring-3 physical TLB.
834 *
835 * @returns VBox status code.
836 * @retval VINF_SUCCESS on success
837 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
838 *
839 * @param pPGM The PGM instance pointer.
840 * @param pPage Pointer to the PGMPAGE structure corresponding to
841 * GCPhys.
842 * @param GCPhys The guest physical address in question.
843 */
844int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
845{
846 Assert(PGMIsLocked(PGM2VM(pPGM)));
847 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
848
849 /*
850 * Map the page.
851 * Make a special case for the zero page as it is kind of special.
852 */
853 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
854 if (!PGM_PAGE_IS_ZERO(pPage))
855 {
856 void *pv;
857 PPGMPAGEMAP pMap;
858 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
859 if (RT_FAILURE(rc))
860 return rc;
861 pTlbe->pMap = pMap;
862 pTlbe->pv = pv;
863 }
864 else
865 {
866 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
867 pTlbe->pMap = NULL;
868 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
869 }
870#if 0 /* broken */
871 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
872#endif
873 pTlbe->pPage = pPage;
874 return VINF_SUCCESS;
875}
876#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
877
878
879/**
880 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
881 * own the PGM lock and therefore not need to lock the mapped page.
882 *
883 * @returns VBox status code.
884 * @retval VINF_SUCCESS on success.
885 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
886 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
887 *
888 * @param pVM The VM handle.
889 * @param GCPhys The guest physical address of the page that should be mapped.
890 * @param pPage Pointer to the PGMPAGE structure for the page.
891 * @param ppv Where to store the address corresponding to GCPhys.
892 *
893 * @internal
894 */
895int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
896{
897 int rc;
898 AssertReturn(pPage, VERR_INTERNAL_ERROR);
899 Assert(PGMIsLocked(pVM));
900
901 /*
902 * Make sure the page is writable.
903 */
904 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
905 {
906 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
907 if (RT_FAILURE(rc))
908 return rc;
909 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
910 }
911 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
912
913 /*
914 * Get the mapping address.
915 */
916#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
917 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
918#else
919 PPGMPAGEMAPTLBE pTlbe;
920 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
921 if (RT_FAILURE(rc))
922 return rc;
923 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
924#endif
925 return VINF_SUCCESS;
926}
927
928
929/**
930 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
931 * own the PGM lock and therefore not need to lock the mapped page.
932 *
933 * @returns VBox status code.
934 * @retval VINF_SUCCESS on success.
935 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
936 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
937 *
938 * @param pVM The VM handle.
939 * @param GCPhys The guest physical address of the page that should be mapped.
940 * @param pPage Pointer to the PGMPAGE structure for the page.
941 * @param ppv Where to store the address corresponding to GCPhys.
942 *
943 * @internal
944 */
945int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
946{
947 AssertReturn(pPage, VERR_INTERNAL_ERROR);
948 Assert(PGMIsLocked(pVM));
949 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
950
951 /*
952 * Get the mapping address.
953 */
954#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
955 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
956#else
957 PPGMPAGEMAPTLBE pTlbe;
958 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
959 if (RT_FAILURE(rc))
960 return rc;
961 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
962#endif
963 return VINF_SUCCESS;
964}
965
966
967/**
968 * Requests the mapping of a guest page into the current context.
969 *
970 * This API should only be used for very short term, as it will consume
971 * scarse resources (R0 and GC) in the mapping cache. When you're done
972 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
973 *
974 * This API will assume your intention is to write to the page, and will
975 * therefore replace shared and zero pages. If you do not intend to modify
976 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
977 *
978 * @returns VBox status code.
979 * @retval VINF_SUCCESS on success.
980 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
981 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
982 *
983 * @param pVM The VM handle.
984 * @param GCPhys The guest physical address of the page that should be mapped.
985 * @param ppv Where to store the address corresponding to GCPhys.
986 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
987 *
988 * @remarks The caller is responsible for dealing with access handlers.
989 * @todo Add an informational return code for pages with access handlers?
990 *
991 * @remark Avoid calling this API from within critical sections (other than the
992 * PGM one) because of the deadlock risk. External threads may need to
993 * delegate jobs to the EMTs.
994 * @thread Any thread.
995 */
996VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
997{
998#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
999
1000 /*
1001 * Find the page and make sure it's writable.
1002 */
1003 PPGMPAGE pPage;
1004 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1005 if (RT_SUCCESS(rc))
1006 {
1007 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1008 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1009 if (RT_SUCCESS(rc))
1010 {
1011 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1012# if 0
1013 pLock->pvMap = 0;
1014 pLock->pvPage = pPage;
1015# else
1016 pLock->u32Dummy = UINT32_MAX;
1017# endif
1018 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1019 rc = VINF_SUCCESS;
1020 }
1021 }
1022
1023#else /* IN_RING3 || IN_RING0 */
1024 int rc = pgmLock(pVM);
1025 AssertRCReturn(rc, rc);
1026
1027 /*
1028 * Query the Physical TLB entry for the page (may fail).
1029 */
1030 PPGMPAGEMAPTLBE pTlbe;
1031 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1032 if (RT_SUCCESS(rc))
1033 {
1034 /*
1035 * If the page is shared, the zero page, or being write monitored
1036 * it must be converted to an page that's writable if possible.
1037 */
1038 PPGMPAGE pPage = pTlbe->pPage;
1039 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1040 {
1041 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1042 if (RT_SUCCESS(rc))
1043 {
1044 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1045 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1046 }
1047 }
1048 if (RT_SUCCESS(rc))
1049 {
1050 /*
1051 * Now, just perform the locking and calculate the return address.
1052 */
1053 PPGMPAGEMAP pMap = pTlbe->pMap;
1054 if (pMap)
1055 pMap->cRefs++;
1056
1057 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1058 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1059 {
1060 if (cLocks == 0)
1061 pVM->pgm.s.cWriteLockedPages++;
1062 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1063 }
1064 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1065 {
1066 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1067 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1068 if (pMap)
1069 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1070 }
1071
1072 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1073 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1074 pLock->pvMap = pMap;
1075 }
1076 }
1077
1078 pgmUnlock(pVM);
1079#endif /* IN_RING3 || IN_RING0 */
1080 return rc;
1081}
1082
1083
1084/**
1085 * Requests the mapping of a guest page into the current context.
1086 *
1087 * This API should only be used for very short term, as it will consume
1088 * scarse resources (R0 and GC) in the mapping cache. When you're done
1089 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1090 *
1091 * @returns VBox status code.
1092 * @retval VINF_SUCCESS on success.
1093 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1094 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1095 *
1096 * @param pVM The VM handle.
1097 * @param GCPhys The guest physical address of the page that should be mapped.
1098 * @param ppv Where to store the address corresponding to GCPhys.
1099 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1100 *
1101 * @remarks The caller is responsible for dealing with access handlers.
1102 * @todo Add an informational return code for pages with access handlers?
1103 *
1104 * @remark Avoid calling this API from within critical sections (other than
1105 * the PGM one) because of the deadlock risk.
1106 * @thread Any thread.
1107 */
1108VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1109{
1110#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1111
1112 /*
1113 * Find the page and make sure it's readable.
1114 */
1115 PPGMPAGE pPage;
1116 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1117 if (RT_SUCCESS(rc))
1118 {
1119 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1120 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1121 else
1122 {
1123 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1124# if 0
1125 pLock->pvMap = 0;
1126 pLock->pvPage = pPage;
1127# else
1128 pLock->u32Dummy = UINT32_MAX;
1129# endif
1130 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1131 rc = VINF_SUCCESS;
1132 }
1133 }
1134
1135#else /* IN_RING3 || IN_RING0 */
1136 int rc = pgmLock(pVM);
1137 AssertRCReturn(rc, rc);
1138
1139 /*
1140 * Query the Physical TLB entry for the page (may fail).
1141 */
1142 PPGMPAGEMAPTLBE pTlbe;
1143 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1144 if (RT_SUCCESS(rc))
1145 {
1146 /* MMIO pages doesn't have any readable backing. */
1147 PPGMPAGE pPage = pTlbe->pPage;
1148 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1149 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1150 else
1151 {
1152 /*
1153 * Now, just perform the locking and calculate the return address.
1154 */
1155 PPGMPAGEMAP pMap = pTlbe->pMap;
1156 if (pMap)
1157 pMap->cRefs++;
1158
1159 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1160 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1161 {
1162 if (cLocks == 0)
1163 pVM->pgm.s.cReadLockedPages++;
1164 PGM_PAGE_INC_READ_LOCKS(pPage);
1165 }
1166 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1167 {
1168 PGM_PAGE_INC_READ_LOCKS(pPage);
1169 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1170 if (pMap)
1171 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1172 }
1173
1174 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1175 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1176 pLock->pvMap = pMap;
1177 }
1178 }
1179
1180 pgmUnlock(pVM);
1181#endif /* IN_RING3 || IN_RING0 */
1182 return rc;
1183}
1184
1185
1186/**
1187 * Requests the mapping of a guest page given by virtual address into the current context.
1188 *
1189 * This API should only be used for very short term, as it will consume
1190 * scarse resources (R0 and GC) in the mapping cache. When you're done
1191 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1192 *
1193 * This API will assume your intention is to write to the page, and will
1194 * therefore replace shared and zero pages. If you do not intend to modify
1195 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1196 *
1197 * @returns VBox status code.
1198 * @retval VINF_SUCCESS on success.
1199 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1200 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1201 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1202 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1203 *
1204 * @param pVCpu VMCPU handle.
1205 * @param GCPhys The guest physical address of the page that should be mapped.
1206 * @param ppv Where to store the address corresponding to GCPhys.
1207 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1208 *
1209 * @remark Avoid calling this API from within critical sections (other than
1210 * the PGM one) because of the deadlock risk.
1211 * @thread EMT
1212 */
1213VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1214{
1215 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1216 RTGCPHYS GCPhys;
1217 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1218 if (RT_SUCCESS(rc))
1219 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1220 return rc;
1221}
1222
1223
1224/**
1225 * Requests the mapping of a guest page given by virtual address into the current context.
1226 *
1227 * This API should only be used for very short term, as it will consume
1228 * scarse resources (R0 and GC) in the mapping cache. When you're done
1229 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1230 *
1231 * @returns VBox status code.
1232 * @retval VINF_SUCCESS on success.
1233 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1234 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1235 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1236 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1237 *
1238 * @param pVCpu VMCPU handle.
1239 * @param GCPhys The guest physical address of the page that should be mapped.
1240 * @param ppv Where to store the address corresponding to GCPhys.
1241 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1242 *
1243 * @remark Avoid calling this API from within critical sections (other than
1244 * the PGM one) because of the deadlock risk.
1245 * @thread EMT
1246 */
1247VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1248{
1249 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1250 RTGCPHYS GCPhys;
1251 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1252 if (RT_SUCCESS(rc))
1253 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1254 return rc;
1255}
1256
1257
1258/**
1259 * Release the mapping of a guest page.
1260 *
1261 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1262 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1263 *
1264 * @param pVM The VM handle.
1265 * @param pLock The lock structure initialized by the mapping function.
1266 */
1267VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1268{
1269#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1270 /* currently nothing to do here. */
1271 Assert(pLock->u32Dummy == UINT32_MAX);
1272 pLock->u32Dummy = 0;
1273
1274#else /* IN_RING3 */
1275 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1276 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1277 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1278
1279 pLock->uPageAndType = 0;
1280 pLock->pvMap = NULL;
1281
1282 pgmLock(pVM);
1283 if (fWriteLock)
1284 {
1285 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1286 Assert(cLocks > 0);
1287 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1288 {
1289 if (cLocks == 1)
1290 {
1291 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1292 pVM->pgm.s.cWriteLockedPages--;
1293 }
1294 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1295 }
1296
1297 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1298 {
1299 PGM_PAGE_SET_WRITTEN_TO(pPage);
1300 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1301 Assert(pVM->pgm.s.cMonitoredPages > 0);
1302 pVM->pgm.s.cMonitoredPages--;
1303 pVM->pgm.s.cWrittenToPages++;
1304 }
1305 }
1306 else
1307 {
1308 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1309 Assert(cLocks > 0);
1310 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1311 {
1312 if (cLocks == 1)
1313 {
1314 Assert(pVM->pgm.s.cReadLockedPages > 0);
1315 pVM->pgm.s.cReadLockedPages--;
1316 }
1317 PGM_PAGE_DEC_READ_LOCKS(pPage);
1318 }
1319 }
1320
1321 if (pMap)
1322 {
1323 Assert(pMap->cRefs >= 1);
1324 pMap->cRefs--;
1325 pMap->iAge = 0;
1326 }
1327 pgmUnlock(pVM);
1328#endif /* IN_RING3 */
1329}
1330
1331
1332/**
1333 * Converts a GC physical address to a HC ring-3 pointer.
1334 *
1335 * @returns VINF_SUCCESS on success.
1336 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1337 * page but has no physical backing.
1338 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1339 * GC physical address.
1340 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1341 * a dynamic ram chunk boundary
1342 *
1343 * @param pVM The VM handle.
1344 * @param GCPhys The GC physical address to convert.
1345 * @param cbRange Physical range
1346 * @param pR3Ptr Where to store the R3 pointer on success.
1347 *
1348 * @deprecated Avoid when possible!
1349 */
1350VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1351{
1352/** @todo this is kind of hacky and needs some more work. */
1353#ifndef DEBUG_sandervl
1354 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1355#endif
1356
1357 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1358#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1359 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1360#else
1361 pgmLock(pVM);
1362
1363 PPGMRAMRANGE pRam;
1364 PPGMPAGE pPage;
1365 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1366 if (RT_SUCCESS(rc))
1367 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1368
1369 pgmUnlock(pVM);
1370 Assert(rc <= VINF_SUCCESS);
1371 return rc;
1372#endif
1373}
1374
1375
1376#ifdef VBOX_STRICT
1377/**
1378 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1379 *
1380 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1381 * @param pVM The VM handle.
1382 * @param GCPhys The GC Physical addresss.
1383 * @param cbRange Physical range.
1384 *
1385 * @deprecated Avoid when possible.
1386 */
1387VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1388{
1389 RTR3PTR R3Ptr;
1390 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1391 if (RT_SUCCESS(rc))
1392 return R3Ptr;
1393 return NIL_RTR3PTR;
1394}
1395#endif /* VBOX_STRICT */
1396
1397
1398/**
1399 * Converts a guest pointer to a GC physical address.
1400 *
1401 * This uses the current CR3/CR0/CR4 of the guest.
1402 *
1403 * @returns VBox status code.
1404 * @param pVCpu The VMCPU Handle
1405 * @param GCPtr The guest pointer to convert.
1406 * @param pGCPhys Where to store the GC physical address.
1407 */
1408VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1409{
1410 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1411 if (pGCPhys && RT_SUCCESS(rc))
1412 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1413 return rc;
1414}
1415
1416
1417/**
1418 * Converts a guest pointer to a HC physical address.
1419 *
1420 * This uses the current CR3/CR0/CR4 of the guest.
1421 *
1422 * @returns VBox status code.
1423 * @param pVCpu The VMCPU Handle
1424 * @param GCPtr The guest pointer to convert.
1425 * @param pHCPhys Where to store the HC physical address.
1426 */
1427VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1428{
1429 PVM pVM = pVCpu->CTX_SUFF(pVM);
1430 RTGCPHYS GCPhys;
1431 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1432 if (RT_SUCCESS(rc))
1433 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1434 return rc;
1435}
1436
1437
1438/**
1439 * Converts a guest pointer to a R3 pointer.
1440 *
1441 * This uses the current CR3/CR0/CR4 of the guest.
1442 *
1443 * @returns VBox status code.
1444 * @param pVCpu The VMCPU Handle
1445 * @param GCPtr The guest pointer to convert.
1446 * @param pR3Ptr Where to store the R3 virtual address.
1447 *
1448 * @deprecated Don't use this.
1449 */
1450VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1451{
1452 PVM pVM = pVCpu->CTX_SUFF(pVM);
1453 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1454 RTGCPHYS GCPhys;
1455 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1456 if (RT_SUCCESS(rc))
1457 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1458 return rc;
1459}
1460
1461
1462
1463#undef LOG_GROUP
1464#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1465
1466
1467#ifdef IN_RING3
1468/**
1469 * Cache PGMPhys memory access
1470 *
1471 * @param pVM VM Handle.
1472 * @param pCache Cache structure pointer
1473 * @param GCPhys GC physical address
1474 * @param pbHC HC pointer corresponding to physical page
1475 *
1476 * @thread EMT.
1477 */
1478static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1479{
1480 uint32_t iCacheIndex;
1481
1482 Assert(VM_IS_EMT(pVM));
1483
1484 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1485 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1486
1487 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1488
1489 ASMBitSet(&pCache->aEntries, iCacheIndex);
1490
1491 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1492 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1493}
1494#endif /* IN_RING3 */
1495
1496
1497/**
1498 * Deals with reading from a page with one or more ALL access handlers.
1499 *
1500 * @returns VBox status code. Can be ignored in ring-3.
1501 * @retval VINF_SUCCESS.
1502 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1503 *
1504 * @param pVM The VM handle.
1505 * @param pPage The page descriptor.
1506 * @param GCPhys The physical address to start reading at.
1507 * @param pvBuf Where to put the bits we read.
1508 * @param cb How much to read - less or equal to a page.
1509 */
1510static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1511{
1512 /*
1513 * The most frequent access here is MMIO and shadowed ROM.
1514 * The current code ASSUMES all these access handlers covers full pages!
1515 */
1516
1517 /*
1518 * Whatever we do we need the source page, map it first.
1519 */
1520 const void *pvSrc = NULL;
1521 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1522 if (RT_FAILURE(rc))
1523 {
1524 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1525 GCPhys, pPage, rc));
1526 memset(pvBuf, 0xff, cb);
1527 return VINF_SUCCESS;
1528 }
1529 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1530
1531 /*
1532 * Deal with any physical handlers.
1533 */
1534 PPGMPHYSHANDLER pPhys = NULL;
1535 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1536 {
1537#ifdef IN_RING3
1538 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1539 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1540 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1541 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1542 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1543 Assert(pPhys->CTX_SUFF(pfnHandler));
1544
1545 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1546 void *pvUser = pPhys->CTX_SUFF(pvUser);
1547
1548 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1549 STAM_PROFILE_START(&pPhys->Stat, h);
1550 Assert(PGMIsLockOwner(pVM));
1551 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1552 pgmUnlock(pVM);
1553 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1554 pgmLock(pVM);
1555# ifdef VBOX_WITH_STATISTICS
1556 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1557 if (pPhys)
1558 STAM_PROFILE_STOP(&pPhys->Stat, h);
1559# else
1560 pPhys = NULL; /* might not be valid anymore. */
1561# endif
1562 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1563#else
1564 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1565 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1566 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1567#endif
1568 }
1569
1570 /*
1571 * Deal with any virtual handlers.
1572 */
1573 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1574 {
1575 unsigned iPage;
1576 PPGMVIRTHANDLER pVirt;
1577
1578 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1579 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1580 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1581 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1582 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1583
1584#ifdef IN_RING3
1585 if (pVirt->pfnHandlerR3)
1586 {
1587 if (!pPhys)
1588 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1589 else
1590 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1591 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1592 + (iPage << PAGE_SHIFT)
1593 + (GCPhys & PAGE_OFFSET_MASK);
1594
1595 STAM_PROFILE_START(&pVirt->Stat, h);
1596 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1597 STAM_PROFILE_STOP(&pVirt->Stat, h);
1598 if (rc2 == VINF_SUCCESS)
1599 rc = VINF_SUCCESS;
1600 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1601 }
1602 else
1603 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1604#else
1605 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1606 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1607 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1608#endif
1609 }
1610
1611 /*
1612 * Take the default action.
1613 */
1614 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1615 memcpy(pvBuf, pvSrc, cb);
1616 return rc;
1617}
1618
1619
1620/**
1621 * Read physical memory.
1622 *
1623 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1624 * want to ignore those.
1625 *
1626 * @returns VBox status code. Can be ignored in ring-3.
1627 * @retval VINF_SUCCESS.
1628 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1629 *
1630 * @param pVM VM Handle.
1631 * @param GCPhys Physical address start reading from.
1632 * @param pvBuf Where to put the read bits.
1633 * @param cbRead How many bytes to read.
1634 */
1635VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1636{
1637 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1638 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1639
1640 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1641 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1642
1643 pgmLock(pVM);
1644
1645 /*
1646 * Copy loop on ram ranges.
1647 */
1648 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1649 for (;;)
1650 {
1651 /* Find range. */
1652 while (pRam && GCPhys > pRam->GCPhysLast)
1653 pRam = pRam->CTX_SUFF(pNext);
1654 /* Inside range or not? */
1655 if (pRam && GCPhys >= pRam->GCPhys)
1656 {
1657 /*
1658 * Must work our way thru this page by page.
1659 */
1660 RTGCPHYS off = GCPhys - pRam->GCPhys;
1661 while (off < pRam->cb)
1662 {
1663 unsigned iPage = off >> PAGE_SHIFT;
1664 PPGMPAGE pPage = &pRam->aPages[iPage];
1665 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1666 if (cb > cbRead)
1667 cb = cbRead;
1668
1669 /*
1670 * Any ALL access handlers?
1671 */
1672 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1673 {
1674 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1675 if (RT_FAILURE(rc))
1676 {
1677 pgmUnlock(pVM);
1678 return rc;
1679 }
1680 }
1681 else
1682 {
1683 /*
1684 * Get the pointer to the page.
1685 */
1686 const void *pvSrc;
1687 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1688 if (RT_SUCCESS(rc))
1689 memcpy(pvBuf, pvSrc, cb);
1690 else
1691 {
1692 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1693 pRam->GCPhys + off, pPage, rc));
1694 memset(pvBuf, 0xff, cb);
1695 }
1696 }
1697
1698 /* next page */
1699 if (cb >= cbRead)
1700 {
1701 pgmUnlock(pVM);
1702 return VINF_SUCCESS;
1703 }
1704 cbRead -= cb;
1705 off += cb;
1706 pvBuf = (char *)pvBuf + cb;
1707 } /* walk pages in ram range. */
1708
1709 GCPhys = pRam->GCPhysLast + 1;
1710 }
1711 else
1712 {
1713 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1714
1715 /*
1716 * Unassigned address space.
1717 */
1718 if (!pRam)
1719 break;
1720 size_t cb = pRam->GCPhys - GCPhys;
1721 if (cb >= cbRead)
1722 {
1723 memset(pvBuf, 0xff, cbRead);
1724 break;
1725 }
1726 memset(pvBuf, 0xff, cb);
1727
1728 cbRead -= cb;
1729 pvBuf = (char *)pvBuf + cb;
1730 GCPhys += cb;
1731 }
1732 } /* Ram range walk */
1733
1734 pgmUnlock(pVM);
1735 return VINF_SUCCESS;
1736}
1737
1738
1739/**
1740 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1741 *
1742 * @returns VBox status code. Can be ignored in ring-3.
1743 * @retval VINF_SUCCESS.
1744 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1745 *
1746 * @param pVM The VM handle.
1747 * @param pPage The page descriptor.
1748 * @param GCPhys The physical address to start writing at.
1749 * @param pvBuf What to write.
1750 * @param cbWrite How much to write - less or equal to a page.
1751 */
1752static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1753{
1754 void *pvDst = NULL;
1755 int rc;
1756
1757 /*
1758 * Give priority to physical handlers (like #PF does).
1759 *
1760 * Hope for a lonely physical handler first that covers the whole
1761 * write area. This should be a pretty frequent case with MMIO and
1762 * the heavy usage of full page handlers in the page pool.
1763 */
1764 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1765 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1766 {
1767 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1768 if (pCur)
1769 {
1770 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1771 Assert(pCur->CTX_SUFF(pfnHandler));
1772
1773 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1774 if (cbRange > cbWrite)
1775 cbRange = cbWrite;
1776
1777#ifndef IN_RING3
1778 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1779 NOREF(cbRange);
1780 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1781 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1782
1783#else /* IN_RING3 */
1784 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1785 if (!PGM_PAGE_IS_MMIO(pPage))
1786 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1787 else
1788 rc = VINF_SUCCESS;
1789 if (RT_SUCCESS(rc))
1790 {
1791 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
1792 void *pvUser = pCur->CTX_SUFF(pvUser);
1793
1794 STAM_PROFILE_START(&pCur->Stat, h);
1795 Assert(PGMIsLockOwner(pVM));
1796 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1797 pgmUnlock(pVM);
1798 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
1799 pgmLock(pVM);
1800# ifdef VBOX_WITH_STATISTICS
1801 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1802 if (pCur)
1803 STAM_PROFILE_STOP(&pCur->Stat, h);
1804# else
1805 pCur = NULL; /* might not be valid anymore. */
1806# endif
1807 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1808 memcpy(pvDst, pvBuf, cbRange);
1809 else
1810 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
1811 }
1812 else
1813 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1814 GCPhys, pPage, rc), rc);
1815 if (RT_LIKELY(cbRange == cbWrite))
1816 return VINF_SUCCESS;
1817
1818 /* more fun to be had below */
1819 cbWrite -= cbRange;
1820 GCPhys += cbRange;
1821 pvBuf = (uint8_t *)pvBuf + cbRange;
1822 pvDst = (uint8_t *)pvDst + cbRange;
1823#endif /* IN_RING3 */
1824 }
1825 /* else: the handler is somewhere else in the page, deal with it below. */
1826 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1827 }
1828 /*
1829 * A virtual handler without any interfering physical handlers.
1830 * Hopefully it'll conver the whole write.
1831 */
1832 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1833 {
1834 unsigned iPage;
1835 PPGMVIRTHANDLER pCur;
1836 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1837 if (RT_SUCCESS(rc))
1838 {
1839 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1840 if (cbRange > cbWrite)
1841 cbRange = cbWrite;
1842
1843#ifndef IN_RING3
1844 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1845 NOREF(cbRange);
1846 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1847 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1848
1849#else /* IN_RING3 */
1850
1851 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1852 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1853 if (RT_SUCCESS(rc))
1854 {
1855 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1856 if (pCur->pfnHandlerR3)
1857 {
1858 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1859 + (iPage << PAGE_SHIFT)
1860 + (GCPhys & PAGE_OFFSET_MASK);
1861
1862 STAM_PROFILE_START(&pCur->Stat, h);
1863 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1864 STAM_PROFILE_STOP(&pCur->Stat, h);
1865 }
1866 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1867 memcpy(pvDst, pvBuf, cbRange);
1868 else
1869 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1870 }
1871 else
1872 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1873 GCPhys, pPage, rc), rc);
1874 if (RT_LIKELY(cbRange == cbWrite))
1875 return VINF_SUCCESS;
1876
1877 /* more fun to be had below */
1878 cbWrite -= cbRange;
1879 GCPhys += cbRange;
1880 pvBuf = (uint8_t *)pvBuf + cbRange;
1881 pvDst = (uint8_t *)pvDst + cbRange;
1882#endif
1883 }
1884 /* else: the handler is somewhere else in the page, deal with it below. */
1885 }
1886
1887 /*
1888 * Deal with all the odd ends.
1889 */
1890
1891 /* We need a writable destination page. */
1892 if (!pvDst)
1893 {
1894 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1895 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1896 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1897 GCPhys, pPage, rc), rc);
1898 }
1899
1900 /* The loop state (big + ugly). */
1901 unsigned iVirtPage = 0;
1902 PPGMVIRTHANDLER pVirt = NULL;
1903 uint32_t offVirt = PAGE_SIZE;
1904 uint32_t offVirtLast = PAGE_SIZE;
1905 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1906
1907 PPGMPHYSHANDLER pPhys = NULL;
1908 uint32_t offPhys = PAGE_SIZE;
1909 uint32_t offPhysLast = PAGE_SIZE;
1910 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1911
1912 /* The loop. */
1913 for (;;)
1914 {
1915 /*
1916 * Find the closest handler at or above GCPhys.
1917 */
1918 if (fMoreVirt && !pVirt)
1919 {
1920 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1921 if (RT_SUCCESS(rc))
1922 {
1923 offVirt = 0;
1924 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1925 }
1926 else
1927 {
1928 PPGMPHYS2VIRTHANDLER pVirtPhys;
1929 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1930 GCPhys, true /* fAbove */);
1931 if ( pVirtPhys
1932 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1933 {
1934 /* ASSUME that pVirtPhys only covers one page. */
1935 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1936 Assert(pVirtPhys->Core.Key > GCPhys);
1937
1938 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1939 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1940 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1941 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1942 }
1943 else
1944 {
1945 pVirt = NULL;
1946 fMoreVirt = false;
1947 offVirt = offVirtLast = PAGE_SIZE;
1948 }
1949 }
1950 }
1951
1952 if (fMorePhys && !pPhys)
1953 {
1954 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1955 if (pPhys)
1956 {
1957 offPhys = 0;
1958 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1959 }
1960 else
1961 {
1962 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
1963 GCPhys, true /* fAbove */);
1964 if ( pPhys
1965 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
1966 {
1967 offPhys = pPhys->Core.Key - GCPhys;
1968 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1969 }
1970 else
1971 {
1972 pPhys = NULL;
1973 fMorePhys = false;
1974 offPhys = offPhysLast = PAGE_SIZE;
1975 }
1976 }
1977 }
1978
1979 /*
1980 * Handle access to space without handlers (that's easy).
1981 */
1982 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1983 uint32_t cbRange = (uint32_t)cbWrite;
1984 if (offPhys && offVirt)
1985 {
1986 if (cbRange > offPhys)
1987 cbRange = offPhys;
1988 if (cbRange > offVirt)
1989 cbRange = offVirt;
1990 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
1991 }
1992 /*
1993 * Physical handler.
1994 */
1995 else if (!offPhys && offVirt)
1996 {
1997 if (cbRange > offPhysLast + 1)
1998 cbRange = offPhysLast + 1;
1999 if (cbRange > offVirt)
2000 cbRange = offVirt;
2001#ifdef IN_RING3
2002 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2003 void *pvUser = pPhys->CTX_SUFF(pvUser);
2004
2005 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2006 STAM_PROFILE_START(&pPhys->Stat, h);
2007 Assert(PGMIsLockOwner(pVM));
2008 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2009 pgmUnlock(pVM);
2010 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2011 pgmLock(pVM);
2012# ifdef VBOX_WITH_STATISTICS
2013 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2014 if (pPhys)
2015 STAM_PROFILE_STOP(&pPhys->Stat, h);
2016# else
2017 pPhys = NULL; /* might not be valid anymore. */
2018# endif
2019 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2020#else
2021 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2022 NOREF(cbRange);
2023 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2024 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2025#endif
2026 }
2027 /*
2028 * Virtual handler.
2029 */
2030 else if (offPhys && !offVirt)
2031 {
2032 if (cbRange > offVirtLast + 1)
2033 cbRange = offVirtLast + 1;
2034 if (cbRange > offPhys)
2035 cbRange = offPhys;
2036#ifdef IN_RING3
2037 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2038 if (pVirt->pfnHandlerR3)
2039 {
2040 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2041 + (iVirtPage << PAGE_SHIFT)
2042 + (GCPhys & PAGE_OFFSET_MASK);
2043 STAM_PROFILE_START(&pVirt->Stat, h);
2044 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2045 STAM_PROFILE_STOP(&pVirt->Stat, h);
2046 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2047 }
2048 pVirt = NULL;
2049#else
2050 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2051 NOREF(cbRange);
2052 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2053 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2054#endif
2055 }
2056 /*
2057 * Both... give the physical one priority.
2058 */
2059 else
2060 {
2061 Assert(!offPhys && !offVirt);
2062 if (cbRange > offVirtLast + 1)
2063 cbRange = offVirtLast + 1;
2064 if (cbRange > offPhysLast + 1)
2065 cbRange = offPhysLast + 1;
2066
2067#ifdef IN_RING3
2068 if (pVirt->pfnHandlerR3)
2069 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2070 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2071
2072 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2073 void *pvUser = pPhys->CTX_SUFF(pvUser);
2074
2075 STAM_PROFILE_START(&pPhys->Stat, h);
2076 Assert(PGMIsLockOwner(pVM));
2077 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2078 pgmUnlock(pVM);
2079 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2080 pgmLock(pVM);
2081# ifdef VBOX_WITH_STATISTICS
2082 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2083 if (pPhys)
2084 STAM_PROFILE_STOP(&pPhys->Stat, h);
2085# else
2086 pPhys = NULL; /* might not be valid anymore. */
2087# endif
2088 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2089 if (pVirt->pfnHandlerR3)
2090 {
2091
2092 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2093 + (iVirtPage << PAGE_SHIFT)
2094 + (GCPhys & PAGE_OFFSET_MASK);
2095 STAM_PROFILE_START(&pVirt->Stat, h);
2096 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2097 STAM_PROFILE_STOP(&pVirt->Stat, h);
2098 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2099 rc = VINF_SUCCESS;
2100 else
2101 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2102 }
2103 pPhys = NULL;
2104 pVirt = NULL;
2105#else
2106 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2107 NOREF(cbRange);
2108 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2109 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2110#endif
2111 }
2112 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2113 memcpy(pvDst, pvBuf, cbRange);
2114
2115 /*
2116 * Advance if we've got more stuff to do.
2117 */
2118 if (cbRange >= cbWrite)
2119 return VINF_SUCCESS;
2120
2121 cbWrite -= cbRange;
2122 GCPhys += cbRange;
2123 pvBuf = (uint8_t *)pvBuf + cbRange;
2124 pvDst = (uint8_t *)pvDst + cbRange;
2125
2126 offPhys -= cbRange;
2127 offPhysLast -= cbRange;
2128 offVirt -= cbRange;
2129 offVirtLast -= cbRange;
2130 }
2131}
2132
2133
2134/**
2135 * Write to physical memory.
2136 *
2137 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2138 * want to ignore those.
2139 *
2140 * @returns VBox status code. Can be ignored in ring-3.
2141 * @retval VINF_SUCCESS.
2142 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2143 *
2144 * @param pVM VM Handle.
2145 * @param GCPhys Physical address to write to.
2146 * @param pvBuf What to write.
2147 * @param cbWrite How many bytes to write.
2148 */
2149VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2150{
2151 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2152 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2153 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2154
2155 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2156 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2157
2158 pgmLock(pVM);
2159
2160 /*
2161 * Copy loop on ram ranges.
2162 */
2163 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2164 for (;;)
2165 {
2166 /* Find range. */
2167 while (pRam && GCPhys > pRam->GCPhysLast)
2168 pRam = pRam->CTX_SUFF(pNext);
2169 /* Inside range or not? */
2170 if (pRam && GCPhys >= pRam->GCPhys)
2171 {
2172 /*
2173 * Must work our way thru this page by page.
2174 */
2175 RTGCPTR off = GCPhys - pRam->GCPhys;
2176 while (off < pRam->cb)
2177 {
2178 RTGCPTR iPage = off >> PAGE_SHIFT;
2179 PPGMPAGE pPage = &pRam->aPages[iPage];
2180 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2181 if (cb > cbWrite)
2182 cb = cbWrite;
2183
2184 /*
2185 * Any active WRITE or ALL access handlers?
2186 */
2187 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2188 {
2189 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2190 if (RT_FAILURE(rc))
2191 {
2192 pgmUnlock(pVM);
2193 return rc;
2194 }
2195 }
2196 else
2197 {
2198 /*
2199 * Get the pointer to the page.
2200 */
2201 void *pvDst;
2202 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2203 if (RT_SUCCESS(rc))
2204 memcpy(pvDst, pvBuf, cb);
2205 else
2206 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2207 pRam->GCPhys + off, pPage, rc));
2208 }
2209
2210 /* next page */
2211 if (cb >= cbWrite)
2212 {
2213 pgmUnlock(pVM);
2214 return VINF_SUCCESS;
2215 }
2216
2217 cbWrite -= cb;
2218 off += cb;
2219 pvBuf = (const char *)pvBuf + cb;
2220 } /* walk pages in ram range */
2221
2222 GCPhys = pRam->GCPhysLast + 1;
2223 }
2224 else
2225 {
2226 /*
2227 * Unassigned address space, skip it.
2228 */
2229 if (!pRam)
2230 break;
2231 size_t cb = pRam->GCPhys - GCPhys;
2232 if (cb >= cbWrite)
2233 break;
2234 cbWrite -= cb;
2235 pvBuf = (const char *)pvBuf + cb;
2236 GCPhys += cb;
2237 }
2238 } /* Ram range walk */
2239
2240 pgmUnlock(pVM);
2241 return VINF_SUCCESS;
2242}
2243
2244
2245/**
2246 * Read from guest physical memory by GC physical address, bypassing
2247 * MMIO and access handlers.
2248 *
2249 * @returns VBox status.
2250 * @param pVM VM handle.
2251 * @param pvDst The destination address.
2252 * @param GCPhysSrc The source address (GC physical address).
2253 * @param cb The number of bytes to read.
2254 */
2255VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2256{
2257 /*
2258 * Treat the first page as a special case.
2259 */
2260 if (!cb)
2261 return VINF_SUCCESS;
2262
2263 /* map the 1st page */
2264 void const *pvSrc;
2265 PGMPAGEMAPLOCK Lock;
2266 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2267 if (RT_FAILURE(rc))
2268 return rc;
2269
2270 /* optimize for the case where access is completely within the first page. */
2271 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2272 if (RT_LIKELY(cb <= cbPage))
2273 {
2274 memcpy(pvDst, pvSrc, cb);
2275 PGMPhysReleasePageMappingLock(pVM, &Lock);
2276 return VINF_SUCCESS;
2277 }
2278
2279 /* copy to the end of the page. */
2280 memcpy(pvDst, pvSrc, cbPage);
2281 PGMPhysReleasePageMappingLock(pVM, &Lock);
2282 GCPhysSrc += cbPage;
2283 pvDst = (uint8_t *)pvDst + cbPage;
2284 cb -= cbPage;
2285
2286 /*
2287 * Page by page.
2288 */
2289 for (;;)
2290 {
2291 /* map the page */
2292 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2293 if (RT_FAILURE(rc))
2294 return rc;
2295
2296 /* last page? */
2297 if (cb <= PAGE_SIZE)
2298 {
2299 memcpy(pvDst, pvSrc, cb);
2300 PGMPhysReleasePageMappingLock(pVM, &Lock);
2301 return VINF_SUCCESS;
2302 }
2303
2304 /* copy the entire page and advance */
2305 memcpy(pvDst, pvSrc, PAGE_SIZE);
2306 PGMPhysReleasePageMappingLock(pVM, &Lock);
2307 GCPhysSrc += PAGE_SIZE;
2308 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2309 cb -= PAGE_SIZE;
2310 }
2311 /* won't ever get here. */
2312}
2313
2314
2315/**
2316 * Write to guest physical memory referenced by GC pointer.
2317 * Write memory to GC physical address in guest physical memory.
2318 *
2319 * This will bypass MMIO and access handlers.
2320 *
2321 * @returns VBox status.
2322 * @param pVM VM handle.
2323 * @param GCPhysDst The GC physical address of the destination.
2324 * @param pvSrc The source buffer.
2325 * @param cb The number of bytes to write.
2326 */
2327VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2328{
2329 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2330
2331 /*
2332 * Treat the first page as a special case.
2333 */
2334 if (!cb)
2335 return VINF_SUCCESS;
2336
2337 /* map the 1st page */
2338 void *pvDst;
2339 PGMPAGEMAPLOCK Lock;
2340 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2341 if (RT_FAILURE(rc))
2342 return rc;
2343
2344 /* optimize for the case where access is completely within the first page. */
2345 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2346 if (RT_LIKELY(cb <= cbPage))
2347 {
2348 memcpy(pvDst, pvSrc, cb);
2349 PGMPhysReleasePageMappingLock(pVM, &Lock);
2350 return VINF_SUCCESS;
2351 }
2352
2353 /* copy to the end of the page. */
2354 memcpy(pvDst, pvSrc, cbPage);
2355 PGMPhysReleasePageMappingLock(pVM, &Lock);
2356 GCPhysDst += cbPage;
2357 pvSrc = (const uint8_t *)pvSrc + cbPage;
2358 cb -= cbPage;
2359
2360 /*
2361 * Page by page.
2362 */
2363 for (;;)
2364 {
2365 /* map the page */
2366 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2367 if (RT_FAILURE(rc))
2368 return rc;
2369
2370 /* last page? */
2371 if (cb <= PAGE_SIZE)
2372 {
2373 memcpy(pvDst, pvSrc, cb);
2374 PGMPhysReleasePageMappingLock(pVM, &Lock);
2375 return VINF_SUCCESS;
2376 }
2377
2378 /* copy the entire page and advance */
2379 memcpy(pvDst, pvSrc, PAGE_SIZE);
2380 PGMPhysReleasePageMappingLock(pVM, &Lock);
2381 GCPhysDst += PAGE_SIZE;
2382 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2383 cb -= PAGE_SIZE;
2384 }
2385 /* won't ever get here. */
2386}
2387
2388
2389/**
2390 * Read from guest physical memory referenced by GC pointer.
2391 *
2392 * This function uses the current CR3/CR0/CR4 of the guest and will
2393 * bypass access handlers and not set any accessed bits.
2394 *
2395 * @returns VBox status.
2396 * @param pVCpu The VMCPU handle.
2397 * @param pvDst The destination address.
2398 * @param GCPtrSrc The source address (GC pointer).
2399 * @param cb The number of bytes to read.
2400 */
2401VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2402{
2403 PVM pVM = pVCpu->CTX_SUFF(pVM);
2404
2405 /*
2406 * Treat the first page as a special case.
2407 */
2408 if (!cb)
2409 return VINF_SUCCESS;
2410
2411 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2412 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2413
2414 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2415 * when many VCPUs are fighting for the lock.
2416 */
2417 pgmLock(pVM);
2418
2419 /* map the 1st page */
2420 void const *pvSrc;
2421 PGMPAGEMAPLOCK Lock;
2422 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2423 if (RT_FAILURE(rc))
2424 {
2425 pgmUnlock(pVM);
2426 return rc;
2427 }
2428
2429 /* optimize for the case where access is completely within the first page. */
2430 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2431 if (RT_LIKELY(cb <= cbPage))
2432 {
2433 memcpy(pvDst, pvSrc, cb);
2434 PGMPhysReleasePageMappingLock(pVM, &Lock);
2435 pgmUnlock(pVM);
2436 return VINF_SUCCESS;
2437 }
2438
2439 /* copy to the end of the page. */
2440 memcpy(pvDst, pvSrc, cbPage);
2441 PGMPhysReleasePageMappingLock(pVM, &Lock);
2442 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2443 pvDst = (uint8_t *)pvDst + cbPage;
2444 cb -= cbPage;
2445
2446 /*
2447 * Page by page.
2448 */
2449 for (;;)
2450 {
2451 /* map the page */
2452 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2453 if (RT_FAILURE(rc))
2454 {
2455 pgmUnlock(pVM);
2456 return rc;
2457 }
2458
2459 /* last page? */
2460 if (cb <= PAGE_SIZE)
2461 {
2462 memcpy(pvDst, pvSrc, cb);
2463 PGMPhysReleasePageMappingLock(pVM, &Lock);
2464 pgmUnlock(pVM);
2465 return VINF_SUCCESS;
2466 }
2467
2468 /* copy the entire page and advance */
2469 memcpy(pvDst, pvSrc, PAGE_SIZE);
2470 PGMPhysReleasePageMappingLock(pVM, &Lock);
2471 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2472 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2473 cb -= PAGE_SIZE;
2474 }
2475 /* won't ever get here. */
2476}
2477
2478
2479/**
2480 * Write to guest physical memory referenced by GC pointer.
2481 *
2482 * This function uses the current CR3/CR0/CR4 of the guest and will
2483 * bypass access handlers and not set dirty or accessed bits.
2484 *
2485 * @returns VBox status.
2486 * @param pVCpu The VMCPU handle.
2487 * @param GCPtrDst The destination address (GC pointer).
2488 * @param pvSrc The source address.
2489 * @param cb The number of bytes to write.
2490 */
2491VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2492{
2493 PVM pVM = pVCpu->CTX_SUFF(pVM);
2494
2495 /*
2496 * Treat the first page as a special case.
2497 */
2498 if (!cb)
2499 return VINF_SUCCESS;
2500
2501 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2502 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2503
2504 /* map the 1st page */
2505 void *pvDst;
2506 PGMPAGEMAPLOCK Lock;
2507 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2508 if (RT_FAILURE(rc))
2509 return rc;
2510
2511 /* optimize for the case where access is completely within the first page. */
2512 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2513 if (RT_LIKELY(cb <= cbPage))
2514 {
2515 memcpy(pvDst, pvSrc, cb);
2516 PGMPhysReleasePageMappingLock(pVM, &Lock);
2517 return VINF_SUCCESS;
2518 }
2519
2520 /* copy to the end of the page. */
2521 memcpy(pvDst, pvSrc, cbPage);
2522 PGMPhysReleasePageMappingLock(pVM, &Lock);
2523 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2524 pvSrc = (const uint8_t *)pvSrc + cbPage;
2525 cb -= cbPage;
2526
2527 /*
2528 * Page by page.
2529 */
2530 for (;;)
2531 {
2532 /* map the page */
2533 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2534 if (RT_FAILURE(rc))
2535 return rc;
2536
2537 /* last page? */
2538 if (cb <= PAGE_SIZE)
2539 {
2540 memcpy(pvDst, pvSrc, cb);
2541 PGMPhysReleasePageMappingLock(pVM, &Lock);
2542 return VINF_SUCCESS;
2543 }
2544
2545 /* copy the entire page and advance */
2546 memcpy(pvDst, pvSrc, PAGE_SIZE);
2547 PGMPhysReleasePageMappingLock(pVM, &Lock);
2548 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2549 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2550 cb -= PAGE_SIZE;
2551 }
2552 /* won't ever get here. */
2553}
2554
2555
2556/**
2557 * Write to guest physical memory referenced by GC pointer and update the PTE.
2558 *
2559 * This function uses the current CR3/CR0/CR4 of the guest and will
2560 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2561 *
2562 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2563 *
2564 * @returns VBox status.
2565 * @param pVCpu The VMCPU handle.
2566 * @param GCPtrDst The destination address (GC pointer).
2567 * @param pvSrc The source address.
2568 * @param cb The number of bytes to write.
2569 */
2570VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2571{
2572 PVM pVM = pVCpu->CTX_SUFF(pVM);
2573
2574 /*
2575 * Treat the first page as a special case.
2576 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2577 */
2578 if (!cb)
2579 return VINF_SUCCESS;
2580
2581 /* map the 1st page */
2582 void *pvDst;
2583 PGMPAGEMAPLOCK Lock;
2584 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2585 if (RT_FAILURE(rc))
2586 return rc;
2587
2588 /* optimize for the case where access is completely within the first page. */
2589 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2590 if (RT_LIKELY(cb <= cbPage))
2591 {
2592 memcpy(pvDst, pvSrc, cb);
2593 PGMPhysReleasePageMappingLock(pVM, &Lock);
2594 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2595 return VINF_SUCCESS;
2596 }
2597
2598 /* copy to the end of the page. */
2599 memcpy(pvDst, pvSrc, cbPage);
2600 PGMPhysReleasePageMappingLock(pVM, &Lock);
2601 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2602 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2603 pvSrc = (const uint8_t *)pvSrc + cbPage;
2604 cb -= cbPage;
2605
2606 /*
2607 * Page by page.
2608 */
2609 for (;;)
2610 {
2611 /* map the page */
2612 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2613 if (RT_FAILURE(rc))
2614 return rc;
2615
2616 /* last page? */
2617 if (cb <= PAGE_SIZE)
2618 {
2619 memcpy(pvDst, pvSrc, cb);
2620 PGMPhysReleasePageMappingLock(pVM, &Lock);
2621 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2622 return VINF_SUCCESS;
2623 }
2624
2625 /* copy the entire page and advance */
2626 memcpy(pvDst, pvSrc, PAGE_SIZE);
2627 PGMPhysReleasePageMappingLock(pVM, &Lock);
2628 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2629 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2630 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2631 cb -= PAGE_SIZE;
2632 }
2633 /* won't ever get here. */
2634}
2635
2636
2637/**
2638 * Read from guest physical memory referenced by GC pointer.
2639 *
2640 * This function uses the current CR3/CR0/CR4 of the guest and will
2641 * respect access handlers and set accessed bits.
2642 *
2643 * @returns VBox status.
2644 * @param pVCpu The VMCPU handle.
2645 * @param pvDst The destination address.
2646 * @param GCPtrSrc The source address (GC pointer).
2647 * @param cb The number of bytes to read.
2648 * @thread The vCPU EMT.
2649 */
2650VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2651{
2652 RTGCPHYS GCPhys;
2653 uint64_t fFlags;
2654 int rc;
2655 PVM pVM = pVCpu->CTX_SUFF(pVM);
2656
2657 /*
2658 * Anything to do?
2659 */
2660 if (!cb)
2661 return VINF_SUCCESS;
2662
2663 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2664
2665 /*
2666 * Optimize reads within a single page.
2667 */
2668 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2669 {
2670 /* Convert virtual to physical address + flags */
2671 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2672 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2673 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2674
2675 /* mark the guest page as accessed. */
2676 if (!(fFlags & X86_PTE_A))
2677 {
2678 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2679 AssertRC(rc);
2680 }
2681
2682 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2683 }
2684
2685 /*
2686 * Page by page.
2687 */
2688 for (;;)
2689 {
2690 /* Convert virtual to physical address + flags */
2691 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2692 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2693 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2694
2695 /* mark the guest page as accessed. */
2696 if (!(fFlags & X86_PTE_A))
2697 {
2698 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2699 AssertRC(rc);
2700 }
2701
2702 /* copy */
2703 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2704 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2705 if (cbRead >= cb || RT_FAILURE(rc))
2706 return rc;
2707
2708 /* next */
2709 cb -= cbRead;
2710 pvDst = (uint8_t *)pvDst + cbRead;
2711 GCPtrSrc += cbRead;
2712 }
2713}
2714
2715
2716/**
2717 * Write to guest physical memory referenced by GC pointer.
2718 *
2719 * This function uses the current CR3/CR0/CR4 of the guest and will
2720 * respect access handlers and set dirty and accessed bits.
2721 *
2722 * @returns VBox status.
2723 * @retval VINF_SUCCESS.
2724 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2725 *
2726 * @param pVCpu The VMCPU handle.
2727 * @param GCPtrDst The destination address (GC pointer).
2728 * @param pvSrc The source address.
2729 * @param cb The number of bytes to write.
2730 */
2731VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2732{
2733 RTGCPHYS GCPhys;
2734 uint64_t fFlags;
2735 int rc;
2736 PVM pVM = pVCpu->CTX_SUFF(pVM);
2737
2738 /*
2739 * Anything to do?
2740 */
2741 if (!cb)
2742 return VINF_SUCCESS;
2743
2744 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2745
2746 /*
2747 * Optimize writes within a single page.
2748 */
2749 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2750 {
2751 /* Convert virtual to physical address + flags */
2752 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2753 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2754 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2755
2756 /* Mention when we ignore X86_PTE_RW... */
2757 if (!(fFlags & X86_PTE_RW))
2758 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2759
2760 /* Mark the guest page as accessed and dirty if necessary. */
2761 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2762 {
2763 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2764 AssertRC(rc);
2765 }
2766
2767 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2768 }
2769
2770 /*
2771 * Page by page.
2772 */
2773 for (;;)
2774 {
2775 /* Convert virtual to physical address + flags */
2776 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2777 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2778 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2779
2780 /* Mention when we ignore X86_PTE_RW... */
2781 if (!(fFlags & X86_PTE_RW))
2782 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2783
2784 /* Mark the guest page as accessed and dirty if necessary. */
2785 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2786 {
2787 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2788 AssertRC(rc);
2789 }
2790
2791 /* copy */
2792 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2793 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2794 if (cbWrite >= cb || RT_FAILURE(rc))
2795 return rc;
2796
2797 /* next */
2798 cb -= cbWrite;
2799 pvSrc = (uint8_t *)pvSrc + cbWrite;
2800 GCPtrDst += cbWrite;
2801 }
2802}
2803
2804
2805/**
2806 * Performs a read of guest virtual memory for instruction emulation.
2807 *
2808 * This will check permissions, raise exceptions and update the access bits.
2809 *
2810 * The current implementation will bypass all access handlers. It may later be
2811 * changed to at least respect MMIO.
2812 *
2813 *
2814 * @returns VBox status code suitable to scheduling.
2815 * @retval VINF_SUCCESS if the read was performed successfully.
2816 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2817 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2818 *
2819 * @param pVCpu The VMCPU handle.
2820 * @param pCtxCore The context core.
2821 * @param pvDst Where to put the bytes we've read.
2822 * @param GCPtrSrc The source address.
2823 * @param cb The number of bytes to read. Not more than a page.
2824 *
2825 * @remark This function will dynamically map physical pages in GC. This may unmap
2826 * mappings done by the caller. Be careful!
2827 */
2828VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2829{
2830 PVM pVM = pVCpu->CTX_SUFF(pVM);
2831 Assert(cb <= PAGE_SIZE);
2832
2833/** @todo r=bird: This isn't perfect!
2834 * -# It's not checking for reserved bits being 1.
2835 * -# It's not correctly dealing with the access bit.
2836 * -# It's not respecting MMIO memory or any other access handlers.
2837 */
2838 /*
2839 * 1. Translate virtual to physical. This may fault.
2840 * 2. Map the physical address.
2841 * 3. Do the read operation.
2842 * 4. Set access bits if required.
2843 */
2844 int rc;
2845 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2846 if (cb <= cb1)
2847 {
2848 /*
2849 * Not crossing pages.
2850 */
2851 RTGCPHYS GCPhys;
2852 uint64_t fFlags;
2853 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
2854 if (RT_SUCCESS(rc))
2855 {
2856 /** @todo we should check reserved bits ... */
2857 void *pvSrc;
2858 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2859 switch (rc)
2860 {
2861 case VINF_SUCCESS:
2862 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2863 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2864 break;
2865 case VERR_PGM_PHYS_PAGE_RESERVED:
2866 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2867 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2868 break;
2869 default:
2870 return rc;
2871 }
2872
2873 /** @todo access bit emulation isn't 100% correct. */
2874 if (!(fFlags & X86_PTE_A))
2875 {
2876 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2877 AssertRC(rc);
2878 }
2879 return VINF_SUCCESS;
2880 }
2881 }
2882 else
2883 {
2884 /*
2885 * Crosses pages.
2886 */
2887 size_t cb2 = cb - cb1;
2888 uint64_t fFlags1;
2889 RTGCPHYS GCPhys1;
2890 uint64_t fFlags2;
2891 RTGCPHYS GCPhys2;
2892 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
2893 if (RT_SUCCESS(rc))
2894 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2895 if (RT_SUCCESS(rc))
2896 {
2897 /** @todo we should check reserved bits ... */
2898 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2899 void *pvSrc1;
2900 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2901 switch (rc)
2902 {
2903 case VINF_SUCCESS:
2904 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2905 break;
2906 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2907 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2908 break;
2909 default:
2910 return rc;
2911 }
2912
2913 void *pvSrc2;
2914 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2915 switch (rc)
2916 {
2917 case VINF_SUCCESS:
2918 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2919 break;
2920 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2921 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2922 break;
2923 default:
2924 return rc;
2925 }
2926
2927 if (!(fFlags1 & X86_PTE_A))
2928 {
2929 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2930 AssertRC(rc);
2931 }
2932 if (!(fFlags2 & X86_PTE_A))
2933 {
2934 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2935 AssertRC(rc);
2936 }
2937 return VINF_SUCCESS;
2938 }
2939 }
2940
2941 /*
2942 * Raise a #PF.
2943 */
2944 uint32_t uErr;
2945
2946 /* Get the current privilege level. */
2947 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
2948 switch (rc)
2949 {
2950 case VINF_SUCCESS:
2951 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2952 break;
2953
2954 case VERR_PAGE_NOT_PRESENT:
2955 case VERR_PAGE_TABLE_NOT_PRESENT:
2956 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2957 break;
2958
2959 default:
2960 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2961 return rc;
2962 }
2963 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2964 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2965}
2966
2967
2968/**
2969 * Performs a read of guest virtual memory for instruction emulation.
2970 *
2971 * This will check permissions, raise exceptions and update the access bits.
2972 *
2973 * The current implementation will bypass all access handlers. It may later be
2974 * changed to at least respect MMIO.
2975 *
2976 *
2977 * @returns VBox status code suitable to scheduling.
2978 * @retval VINF_SUCCESS if the read was performed successfully.
2979 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2980 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2981 *
2982 * @param pVCpu The VMCPU handle.
2983 * @param pCtxCore The context core.
2984 * @param pvDst Where to put the bytes we've read.
2985 * @param GCPtrSrc The source address.
2986 * @param cb The number of bytes to read. Not more than a page.
2987 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
2988 * an appropriate error status will be returned (no
2989 * informational at all).
2990 *
2991 *
2992 * @remarks Takes the PGM lock.
2993 * @remarks A page fault on the 2nd page of the access will be raised without
2994 * writing the bits on the first page since we're ASSUMING that the
2995 * caller is emulating an instruction access.
2996 * @remarks This function will dynamically map physical pages in GC. This may
2997 * unmap mappings done by the caller. Be careful!
2998 */
2999VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3000{
3001 PVM pVM = pVCpu->CTX_SUFF(pVM);
3002 Assert(cb <= PAGE_SIZE);
3003
3004 /*
3005 * 1. Translate virtual to physical. This may fault.
3006 * 2. Map the physical address.
3007 * 3. Do the read operation.
3008 * 4. Set access bits if required.
3009 */
3010 int rc;
3011 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3012 if (cb <= cb1)
3013 {
3014 /*
3015 * Not crossing pages.
3016 */
3017 RTGCPHYS GCPhys;
3018 uint64_t fFlags;
3019 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3020 if (RT_SUCCESS(rc))
3021 {
3022 if (1) /** @todo we should check reserved bits ... */
3023 {
3024 const void *pvSrc;
3025 PGMPAGEMAPLOCK Lock;
3026 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3027 switch (rc)
3028 {
3029 case VINF_SUCCESS:
3030 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3031 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3032 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3033 break;
3034 case VERR_PGM_PHYS_PAGE_RESERVED:
3035 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3036 memset(pvDst, 0xff, cb);
3037 break;
3038 default:
3039 AssertMsgFailed(("%Rrc\n", rc));
3040 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3041 return rc;
3042 }
3043 PGMPhysReleasePageMappingLock(pVM, &Lock);
3044
3045 if (!(fFlags & X86_PTE_A))
3046 {
3047 /** @todo access bit emulation isn't 100% correct. */
3048 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3049 AssertRC(rc);
3050 }
3051 return VINF_SUCCESS;
3052 }
3053 }
3054 }
3055 else
3056 {
3057 /*
3058 * Crosses pages.
3059 */
3060 size_t cb2 = cb - cb1;
3061 uint64_t fFlags1;
3062 RTGCPHYS GCPhys1;
3063 uint64_t fFlags2;
3064 RTGCPHYS GCPhys2;
3065 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3066 if (RT_SUCCESS(rc))
3067 {
3068 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3069 if (RT_SUCCESS(rc))
3070 {
3071 if (1) /** @todo we should check reserved bits ... */
3072 {
3073 const void *pvSrc;
3074 PGMPAGEMAPLOCK Lock;
3075 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3076 switch (rc)
3077 {
3078 case VINF_SUCCESS:
3079 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3080 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3081 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3082 PGMPhysReleasePageMappingLock(pVM, &Lock);
3083 break;
3084 case VERR_PGM_PHYS_PAGE_RESERVED:
3085 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3086 memset(pvDst, 0xff, cb1);
3087 break;
3088 default:
3089 AssertMsgFailed(("%Rrc\n", rc));
3090 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3091 return rc;
3092 }
3093
3094 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3095 switch (rc)
3096 {
3097 case VINF_SUCCESS:
3098 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3099 PGMPhysReleasePageMappingLock(pVM, &Lock);
3100 break;
3101 case VERR_PGM_PHYS_PAGE_RESERVED:
3102 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3103 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3104 break;
3105 default:
3106 AssertMsgFailed(("%Rrc\n", rc));
3107 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3108 return rc;
3109 }
3110
3111 if (!(fFlags1 & X86_PTE_A))
3112 {
3113 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3114 AssertRC(rc);
3115 }
3116 if (!(fFlags2 & X86_PTE_A))
3117 {
3118 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3119 AssertRC(rc);
3120 }
3121 return VINF_SUCCESS;
3122 }
3123 /* sort out which page */
3124 }
3125 else
3126 GCPtrSrc += cb1; /* fault on 2nd page */
3127 }
3128 }
3129
3130 /*
3131 * Raise a #PF if we're allowed to do that.
3132 */
3133 /* Calc the error bits. */
3134 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3135 uint32_t uErr;
3136 switch (rc)
3137 {
3138 case VINF_SUCCESS:
3139 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3140 rc = VERR_ACCESS_DENIED;
3141 break;
3142
3143 case VERR_PAGE_NOT_PRESENT:
3144 case VERR_PAGE_TABLE_NOT_PRESENT:
3145 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3146 break;
3147
3148 default:
3149 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3150 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3151 return rc;
3152 }
3153 if (fRaiseTrap)
3154 {
3155 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3156 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3157 }
3158 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3159 return rc;
3160}
3161
3162
3163/**
3164 * Performs a write to guest virtual memory for instruction emulation.
3165 *
3166 * This will check permissions, raise exceptions and update the dirty and access
3167 * bits.
3168 *
3169 * @returns VBox status code suitable to scheduling.
3170 * @retval VINF_SUCCESS if the read was performed successfully.
3171 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3172 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3173 *
3174 * @param pVCpu The VMCPU handle.
3175 * @param pCtxCore The context core.
3176 * @param GCPtrDst The destination address.
3177 * @param pvSrc What to write.
3178 * @param cb The number of bytes to write. Not more than a page.
3179 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3180 * an appropriate error status will be returned (no
3181 * informational at all).
3182 *
3183 * @remarks Takes the PGM lock.
3184 * @remarks A page fault on the 2nd page of the access will be raised without
3185 * writing the bits on the first page since we're ASSUMING that the
3186 * caller is emulating an instruction access.
3187 * @remarks This function will dynamically map physical pages in GC. This may
3188 * unmap mappings done by the caller. Be careful!
3189 */
3190VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3191{
3192 Assert(cb <= PAGE_SIZE);
3193 PVM pVM = pVCpu->CTX_SUFF(pVM);
3194
3195 /*
3196 * 1. Translate virtual to physical. This may fault.
3197 * 2. Map the physical address.
3198 * 3. Do the write operation.
3199 * 4. Set access bits if required.
3200 */
3201 int rc;
3202 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3203 if (cb <= cb1)
3204 {
3205 /*
3206 * Not crossing pages.
3207 */
3208 RTGCPHYS GCPhys;
3209 uint64_t fFlags;
3210 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3211 if (RT_SUCCESS(rc))
3212 {
3213 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3214 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3215 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3216 {
3217 void *pvDst;
3218 PGMPAGEMAPLOCK Lock;
3219 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3220 switch (rc)
3221 {
3222 case VINF_SUCCESS:
3223 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3224 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3225 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3226 PGMPhysReleasePageMappingLock(pVM, &Lock);
3227 break;
3228 case VERR_PGM_PHYS_PAGE_RESERVED:
3229 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3230 /* bit bucket */
3231 break;
3232 default:
3233 AssertMsgFailed(("%Rrc\n", rc));
3234 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3235 return rc;
3236 }
3237
3238 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3239 {
3240 /** @todo dirty & access bit emulation isn't 100% correct. */
3241 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3242 AssertRC(rc);
3243 }
3244 return VINF_SUCCESS;
3245 }
3246 rc = VERR_ACCESS_DENIED;
3247 }
3248 }
3249 else
3250 {
3251 /*
3252 * Crosses pages.
3253 */
3254 size_t cb2 = cb - cb1;
3255 uint64_t fFlags1;
3256 RTGCPHYS GCPhys1;
3257 uint64_t fFlags2;
3258 RTGCPHYS GCPhys2;
3259 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3260 if (RT_SUCCESS(rc))
3261 {
3262 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3263 if (RT_SUCCESS(rc))
3264 {
3265 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3266 && (fFlags2 & X86_PTE_RW))
3267 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3268 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3269 {
3270 void *pvDst;
3271 PGMPAGEMAPLOCK Lock;
3272 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3273 switch (rc)
3274 {
3275 case VINF_SUCCESS:
3276 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3277 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3278 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3279 PGMPhysReleasePageMappingLock(pVM, &Lock);
3280 break;
3281 case VERR_PGM_PHYS_PAGE_RESERVED:
3282 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3283 /* bit bucket */
3284 break;
3285 default:
3286 AssertMsgFailed(("%Rrc\n", rc));
3287 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3288 return rc;
3289 }
3290
3291 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3292 switch (rc)
3293 {
3294 case VINF_SUCCESS:
3295 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3296 PGMPhysReleasePageMappingLock(pVM, &Lock);
3297 break;
3298 case VERR_PGM_PHYS_PAGE_RESERVED:
3299 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3300 /* bit bucket */
3301 break;
3302 default:
3303 AssertMsgFailed(("%Rrc\n", rc));
3304 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3305 return rc;
3306 }
3307
3308 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3309 {
3310 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3311 AssertRC(rc);
3312 }
3313 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3314 {
3315 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3316 AssertRC(rc);
3317 }
3318 return VINF_SUCCESS;
3319 }
3320 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3321 GCPtrDst += cb1; /* fault on the 2nd page. */
3322 rc = VERR_ACCESS_DENIED;
3323 }
3324 else
3325 GCPtrDst += cb1; /* fault on the 2nd page. */
3326 }
3327 }
3328
3329 /*
3330 * Raise a #PF if we're allowed to do that.
3331 */
3332 /* Calc the error bits. */
3333 uint32_t uErr;
3334 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3335 switch (rc)
3336 {
3337 case VINF_SUCCESS:
3338 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3339 rc = VERR_ACCESS_DENIED;
3340 break;
3341
3342 case VERR_ACCESS_DENIED:
3343 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3344 break;
3345
3346 case VERR_PAGE_NOT_PRESENT:
3347 case VERR_PAGE_TABLE_NOT_PRESENT:
3348 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3349 break;
3350
3351 default:
3352 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3353 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3354 return rc;
3355 }
3356 if (fRaiseTrap)
3357 {
3358 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3359 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3360 }
3361 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3362 return rc;
3363}
3364
3365
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette