VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 18729

Last change on this file since 18729 was 18716, checked in by vboxsync, 16 years ago

PGMAllPhys.cpp: MM_RAM_FLAGS_RESERVED is no more, cleaned out references to it.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 110.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 18716 2009-04-05 12:09:45Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
66 switch (pRom->aPages[iPage].enmProt)
67 {
68 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
69 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
70 {
71 /*
72 * If it's a simple instruction which doesn't change the cpu state
73 * we will simply skip it. Otherwise we'll have to defer it to REM.
74 */
75 uint32_t cbOp;
76 DISCPUSTATE Cpu;
77 rc = EMInterpretDisasOne(pVM, pRegFrame, &Cpu, &cbOp);
78 if ( RT_SUCCESS(rc)
79 && Cpu.mode == CPUMODE_32BIT /** @todo why does this matter? */
80 && !(Cpu.prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
81 {
82 switch (Cpu.opcode)
83 {
84 /** @todo Find other instructions we can safely skip, possibly
85 * adding this kind of detection to DIS or EM. */
86 case OP_MOV:
87 pRegFrame->rip += cbOp;
88 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteHandled);
89 return VINF_SUCCESS;
90 }
91 }
92 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
93 return rc;
94 break;
95 }
96
97 case PGMROMPROT_READ_RAM_WRITE_RAM:
98 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
99 AssertRC(rc);
100 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
101
102 case PGMROMPROT_READ_ROM_WRITE_RAM:
103 /* Handle it in ring-3 because it's *way* easier there. */
104 break;
105
106 default:
107 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
108 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
109 VERR_INTERNAL_ERROR);
110 }
111
112 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteUnhandled);
113 return VINF_EM_RAW_EMULATE_INSTR;
114}
115
116#endif /* IN_RING3 */
117
118/**
119 * Checks if Address Gate 20 is enabled or not.
120 *
121 * @returns true if enabled.
122 * @returns false if disabled.
123 * @param pVM VM handle.
124 */
125VMMDECL(bool) PGMPhysIsA20Enabled(PVM pVM)
126{
127 LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
128 return pVM->pgm.s.fA20Enabled;
129}
130
131
132/**
133 * Validates a GC physical address.
134 *
135 * @returns true if valid.
136 * @returns false if invalid.
137 * @param pVM The VM handle.
138 * @param GCPhys The physical address to validate.
139 */
140VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
141{
142 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
143 return pPage != NULL;
144}
145
146
147/**
148 * Checks if a GC physical address is a normal page,
149 * i.e. not ROM, MMIO or reserved.
150 *
151 * @returns true if normal.
152 * @returns false if invalid, ROM, MMIO or reserved page.
153 * @param pVM The VM handle.
154 * @param GCPhys The physical address to check.
155 */
156VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
157{
158 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
159 return pPage
160 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
161}
162
163
164/**
165 * Converts a GC physical address to a HC physical address.
166 *
167 * @returns VINF_SUCCESS on success.
168 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
169 * page but has no physical backing.
170 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
171 * GC physical address.
172 *
173 * @param pVM The VM handle.
174 * @param GCPhys The GC physical address to convert.
175 * @param pHCPhys Where to store the HC physical address on success.
176 */
177VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
178{
179 PPGMPAGE pPage;
180 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
181 if (RT_FAILURE(rc))
182 return rc;
183
184 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
185 return VINF_SUCCESS;
186}
187
188
189/**
190 * Invalidates the GC page mapping TLB.
191 *
192 * @param pVM The VM handle.
193 */
194VMMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
195{
196 /* later */
197 NOREF(pVM);
198}
199
200
201/**
202 * Invalidates the ring-0 page mapping TLB.
203 *
204 * @param pVM The VM handle.
205 */
206VMMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
207{
208 PGMPhysInvalidatePageR3MapTLB(pVM);
209}
210
211
212/**
213 * Invalidates the ring-3 page mapping TLB.
214 *
215 * @param pVM The VM handle.
216 */
217VMMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
218{
219 pgmLock(pVM);
220 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
221 {
222 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
223 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
224 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
225 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
226 }
227 pgmUnlock(pVM);
228}
229
230
231/**
232 * Makes sure that there is at least one handy page ready for use.
233 *
234 * This will also take the appropriate actions when reaching water-marks.
235 *
236 * @returns VBox status code.
237 * @retval VINF_SUCCESS on success.
238 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
239 *
240 * @param pVM The VM handle.
241 *
242 * @remarks Must be called from within the PGM critical section. It may
243 * nip back to ring-3/0 in some cases.
244 */
245static int pgmPhysEnsureHandyPage(PVM pVM)
246{
247 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
248
249 /*
250 * Do we need to do anything special?
251 */
252#ifdef IN_RING3
253 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
254#else
255 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
256#endif
257 {
258 /*
259 * Allocate pages only if we're out of them, or in ring-3, almost out.
260 */
261#ifdef IN_RING3
262 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
263#else
264 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
265#endif
266 {
267 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
268 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
269#ifdef IN_RING3
270 int rc = PGMR3PhysAllocateHandyPages(pVM);
271#elif defined(IN_RING0)
272 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
273#else
274 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
275#endif
276 if (RT_UNLIKELY(rc != VINF_SUCCESS))
277 {
278 if (RT_FAILURE(rc))
279 return rc;
280 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
281 if (!pVM->pgm.s.cHandyPages)
282 {
283 LogRel(("PGM: no more handy pages!\n"));
284 return VERR_EM_NO_MEMORY;
285 }
286 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
287 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
288#ifdef IN_RING3
289 REMR3NotifyFF(pVM);
290#else
291 VM_FF_SET(pVM, VM_FF_TO_R3); /* paranoia */
292#endif
293 }
294 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
295 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
296 ("%u\n", pVM->pgm.s.cHandyPages),
297 VERR_INTERNAL_ERROR);
298 }
299 else
300 {
301 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
302 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
303#ifndef IN_RING3
304 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
305 {
306 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
307 VM_FF_SET(pVM, VM_FF_TO_R3);
308 }
309#endif
310 }
311 }
312
313 return VINF_SUCCESS;
314}
315
316
317/**
318 * Replace a zero or shared page with new page that we can write to.
319 *
320 * @returns The following VBox status codes.
321 * @retval VINF_SUCCESS on success, pPage is modified.
322 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
323 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
324 *
325 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
326 *
327 * @param pVM The VM address.
328 * @param pPage The physical page tracking structure. This will
329 * be modified on success.
330 * @param GCPhys The address of the page.
331 *
332 * @remarks Must be called from within the PGM critical section. It may
333 * nip back to ring-3/0 in some cases.
334 *
335 * @remarks This function shouldn't really fail, however if it does
336 * it probably means we've screwed up the size of the amount
337 * and/or the low-water mark of handy pages. Or, that some
338 * device I/O is causing a lot of pages to be allocated while
339 * while the host is in a low-memory condition.
340 */
341int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
342{
343 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
344
345 /*
346 * Prereqs.
347 */
348 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
349 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
350 Assert(!PGM_PAGE_IS_MMIO(pPage));
351
352
353 /*
354 * Flush any shadow page table mappings of the page.
355 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
356 */
357 bool fFlushTLBs = false;
358 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
359 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
360
361 /*
362 * Ensure that we've got a page handy, take it and use it.
363 */
364 int rc2 = pgmPhysEnsureHandyPage(pVM);
365 if (RT_FAILURE(rc2))
366 {
367 if (fFlushTLBs)
368 PGM_INVL_GUEST_TLBS();
369 Assert(rc2 == VERR_EM_NO_MEMORY);
370 return rc2;
371 }
372 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
373 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
374 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
375 Assert(!PGM_PAGE_IS_MMIO(pPage));
376
377 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
378 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
379 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
380 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
381 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
382 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
383
384 /*
385 * There are one or two action to be taken the next time we allocate handy pages:
386 * - Tell the GMM (global memory manager) what the page is being used for.
387 * (Speeds up replacement operations - sharing and defragmenting.)
388 * - If the current backing is shared, it must be freed.
389 */
390 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
391 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
392
393 if (PGM_PAGE_IS_SHARED(pPage))
394 {
395 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
396 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
397 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
398
399 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
400 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
401 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
402 pVM->pgm.s.cSharedPages--;
403 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
404 }
405 else
406 {
407 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
408 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
409 pVM->pgm.s.cZeroPages--;
410 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
411 }
412
413 /*
414 * Do the PGMPAGE modifications.
415 */
416 pVM->pgm.s.cPrivatePages++;
417 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
418 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
419 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
420
421 if ( fFlushTLBs
422 && rc != VINF_PGM_GCPHYS_ALIASED)
423 PGM_INVL_GUEST_TLBS();
424 return rc;
425}
426
427
428/**
429 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
430 *
431 * @returns VBox status code.
432 * @retval VINF_SUCCESS on success.
433 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
434 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
435 *
436 * @param pVM The VM address.
437 * @param pPage The physical page tracking structure.
438 * @param GCPhys The address of the page.
439 *
440 * @remarks Called from within the PGM critical section.
441 */
442int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
443{
444 switch (PGM_PAGE_GET_STATE(pPage))
445 {
446 case PGM_PAGE_STATE_WRITE_MONITORED:
447 PGM_PAGE_SET_WRITTEN_TO(pPage);
448 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
449 /* fall thru */
450 default: /* to shut up GCC */
451 case PGM_PAGE_STATE_ALLOCATED:
452 return VINF_SUCCESS;
453
454 /*
455 * Zero pages can be dummy pages for MMIO or reserved memory,
456 * so we need to check the flags before joining cause with
457 * shared page replacement.
458 */
459 case PGM_PAGE_STATE_ZERO:
460 if (PGM_PAGE_IS_MMIO(pPage))
461 return VERR_PGM_PHYS_PAGE_RESERVED;
462 /* fall thru */
463 case PGM_PAGE_STATE_SHARED:
464 return pgmPhysAllocPage(pVM, pPage, GCPhys);
465 }
466}
467
468
469/**
470 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
471 *
472 * @returns VBox status code.
473 * @retval VINF_SUCCESS on success.
474 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
475 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
476 *
477 * @param pVM The VM address.
478 * @param pPage The physical page tracking structure.
479 * @param GCPhys The address of the page.
480 */
481int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
482{
483 int rc = pgmLock(pVM);
484 if (RT_SUCCESS(rc))
485 {
486 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
487 pgmUnlock(pVM);
488 }
489 return rc;
490}
491
492
493/**
494 * Internal usage: Map the page specified by its GMM ID.
495 *
496 * This is similar to pgmPhysPageMap
497 *
498 * @returns VBox status code.
499 *
500 * @param pVM The VM handle.
501 * @param idPage The Page ID.
502 * @param HCPhys The physical address (for RC).
503 * @param ppv Where to store the mapping address.
504 *
505 * @remarks Called from within the PGM critical section.
506 */
507int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
508{
509 /*
510 * Validation.
511 */
512 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
513 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
514 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
515 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
516
517#ifdef IN_RC
518 /*
519 * Map it by HCPhys.
520 */
521 return PGMDynMapHCPage(pVM, HCPhys, ppv);
522
523#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
524 /*
525 * Map it by HCPhys.
526 */
527 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
528
529#else
530 /*
531 * Find/make Chunk TLB entry for the mapping chunk.
532 */
533 PPGMCHUNKR3MAP pMap;
534 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
535 if (pTlbe->idChunk == idChunk)
536 {
537 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
538 pMap = pTlbe->pChunk;
539 }
540 else
541 {
542 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
543
544 /*
545 * Find the chunk, map it if necessary.
546 */
547 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
548 if (!pMap)
549 {
550# ifdef IN_RING0
551 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
552 AssertRCReturn(rc, rc);
553 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
554 Assert(pMap);
555# else
556 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
557 if (RT_FAILURE(rc))
558 return rc;
559# endif
560 }
561
562 /*
563 * Enter it into the Chunk TLB.
564 */
565 pTlbe->idChunk = idChunk;
566 pTlbe->pChunk = pMap;
567 pMap->iAge = 0;
568 }
569
570 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
571 return VINF_SUCCESS;
572#endif
573}
574
575
576/**
577 * Maps a page into the current virtual address space so it can be accessed.
578 *
579 * @returns VBox status code.
580 * @retval VINF_SUCCESS on success.
581 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
582 *
583 * @param pVM The VM address.
584 * @param pPage The physical page tracking structure.
585 * @param GCPhys The address of the page.
586 * @param ppMap Where to store the address of the mapping tracking structure.
587 * @param ppv Where to store the mapping address of the page. The page
588 * offset is masked off!
589 *
590 * @remarks Called from within the PGM critical section.
591 */
592int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
593{
594 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
595
596#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
597 /*
598 * Just some sketchy GC/R0-darwin code.
599 */
600 *ppMap = NULL;
601 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
602 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
603# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
604 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
605# else
606 PGMDynMapHCPage(pVM, HCPhys, ppv);
607# endif
608 return VINF_SUCCESS;
609
610#else /* IN_RING3 || IN_RING0 */
611
612
613 /*
614 * Special case: ZERO and MMIO2 pages.
615 */
616 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
617 if (idChunk == NIL_GMM_CHUNKID)
618 {
619 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
620 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
621 {
622 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
623 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
624 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
625 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
626 }
627 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
628 {
629 /** @todo deal with aliased MMIO2 pages somehow...
630 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
631 * them, that would also avoid this mess. It would actually be kind of
632 * elegant... */
633 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
634 }
635 else
636 {
637 /** @todo handle MMIO2 */
638 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
639 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
640 ("pPage=%R[pgmpage]\n", pPage),
641 VERR_INTERNAL_ERROR_2);
642 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
643 }
644 *ppMap = NULL;
645 return VINF_SUCCESS;
646 }
647
648 /*
649 * Find/make Chunk TLB entry for the mapping chunk.
650 */
651 PPGMCHUNKR3MAP pMap;
652 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
653 if (pTlbe->idChunk == idChunk)
654 {
655 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
656 pMap = pTlbe->pChunk;
657 }
658 else
659 {
660 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
661
662 /*
663 * Find the chunk, map it if necessary.
664 */
665 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
666 if (!pMap)
667 {
668#ifdef IN_RING0
669 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
670 AssertRCReturn(rc, rc);
671 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
672 Assert(pMap);
673#else
674 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
675 if (RT_FAILURE(rc))
676 return rc;
677#endif
678 }
679
680 /*
681 * Enter it into the Chunk TLB.
682 */
683 pTlbe->idChunk = idChunk;
684 pTlbe->pChunk = pMap;
685 pMap->iAge = 0;
686 }
687
688 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
689 *ppMap = pMap;
690 return VINF_SUCCESS;
691#endif /* IN_RING3 */
692}
693
694
695#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
696/**
697 * Load a guest page into the ring-3 physical TLB.
698 *
699 * @returns VBox status code.
700 * @retval VINF_SUCCESS on success
701 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
702 * @param pPGM The PGM instance pointer.
703 * @param GCPhys The guest physical address in question.
704 */
705int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
706{
707 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
708
709 /*
710 * Find the ram range.
711 * 99.8% of requests are expected to be in the first range.
712 */
713 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
714 RTGCPHYS off = GCPhys - pRam->GCPhys;
715 if (RT_UNLIKELY(off >= pRam->cb))
716 {
717 do
718 {
719 pRam = pRam->CTX_SUFF(pNext);
720 if (!pRam)
721 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
722 off = GCPhys - pRam->GCPhys;
723 } while (off >= pRam->cb);
724 }
725
726 /*
727 * Map the page.
728 * Make a special case for the zero page as it is kind of special.
729 */
730 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
731 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
732 if (!PGM_PAGE_IS_ZERO(pPage))
733 {
734 void *pv;
735 PPGMPAGEMAP pMap;
736 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
737 if (RT_FAILURE(rc))
738 return rc;
739 pTlbe->pMap = pMap;
740 pTlbe->pv = pv;
741 }
742 else
743 {
744 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
745 pTlbe->pMap = NULL;
746 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
747 }
748 pTlbe->pPage = pPage;
749 return VINF_SUCCESS;
750}
751
752
753/**
754 * Load a guest page into the ring-3 physical TLB.
755 *
756 * @returns VBox status code.
757 * @retval VINF_SUCCESS on success
758 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
759 *
760 * @param pPGM The PGM instance pointer.
761 * @param pPage Pointer to the PGMPAGE structure corresponding to
762 * GCPhys.
763 * @param GCPhys The guest physical address in question.
764 */
765int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
766{
767 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
768
769 /*
770 * Map the page.
771 * Make a special case for the zero page as it is kind of special.
772 */
773 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
774 if (!PGM_PAGE_IS_ZERO(pPage))
775 {
776 void *pv;
777 PPGMPAGEMAP pMap;
778 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
779 if (RT_FAILURE(rc))
780 return rc;
781 pTlbe->pMap = pMap;
782 pTlbe->pv = pv;
783 }
784 else
785 {
786 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
787 pTlbe->pMap = NULL;
788 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
789 }
790 pTlbe->pPage = pPage;
791 return VINF_SUCCESS;
792}
793#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
794
795
796/**
797 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
798 * own the PGM lock and therefore not need to lock the mapped page.
799 *
800 * @returns VBox status code.
801 * @retval VINF_SUCCESS on success.
802 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
803 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
804 *
805 * @param pVM The VM handle.
806 * @param GCPhys The guest physical address of the page that should be mapped.
807 * @param pPage Pointer to the PGMPAGE structure for the page.
808 * @param ppv Where to store the address corresponding to GCPhys.
809 *
810 * @internal
811 */
812int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
813{
814 int rc;
815 AssertReturn(pPage, VERR_INTERNAL_ERROR);
816 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
817
818 /*
819 * Make sure the page is writable.
820 */
821 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
822 {
823 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
824 if (RT_FAILURE(rc))
825 return rc;
826 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
827 }
828 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
829
830 /*
831 * Get the mapping address.
832 */
833#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
834 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
835#else
836 PPGMPAGEMAPTLBE pTlbe;
837 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
838 if (RT_FAILURE(rc))
839 return rc;
840 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
841#endif
842 return VINF_SUCCESS;
843}
844
845
846/**
847 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
848 * own the PGM lock and therefore not need to lock the mapped page.
849 *
850 * @returns VBox status code.
851 * @retval VINF_SUCCESS on success.
852 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
853 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
854 *
855 * @param pVM The VM handle.
856 * @param GCPhys The guest physical address of the page that should be mapped.
857 * @param pPage Pointer to the PGMPAGE structure for the page.
858 * @param ppv Where to store the address corresponding to GCPhys.
859 *
860 * @internal
861 */
862int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
863{
864 AssertReturn(pPage, VERR_INTERNAL_ERROR);
865 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
866 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
867
868 /*
869 * Get the mapping address.
870 */
871#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
872 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
873#else
874 PPGMPAGEMAPTLBE pTlbe;
875 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
876 if (RT_FAILURE(rc))
877 return rc;
878 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
879#endif
880 return VINF_SUCCESS;
881}
882
883
884/**
885 * Requests the mapping of a guest page into the current context.
886 *
887 * This API should only be used for very short term, as it will consume
888 * scarse resources (R0 and GC) in the mapping cache. When you're done
889 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
890 *
891 * This API will assume your intention is to write to the page, and will
892 * therefore replace shared and zero pages. If you do not intend to modify
893 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
894 *
895 * @returns VBox status code.
896 * @retval VINF_SUCCESS on success.
897 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
898 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
899 *
900 * @param pVM The VM handle.
901 * @param GCPhys The guest physical address of the page that should be mapped.
902 * @param ppv Where to store the address corresponding to GCPhys.
903 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
904 *
905 * @remarks The caller is responsible for dealing with access handlers.
906 * @todo Add an informational return code for pages with access handlers?
907 *
908 * @remark Avoid calling this API from within critical sections (other than the
909 * PGM one) because of the deadlock risk. External threads may need to
910 * delegate jobs to the EMTs.
911 * @thread Any thread.
912 */
913VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
914{
915#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
916
917 /*
918 * Find the page and make sure it's writable.
919 */
920 PPGMPAGE pPage;
921 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
922 if (RT_SUCCESS(rc))
923 {
924 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
925 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
926 if (RT_SUCCESS(rc))
927 {
928 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
929# if 0
930 pLock->pvMap = 0;
931 pLock->pvPage = pPage;
932# else
933 pLock->u32Dummy = UINT32_MAX;
934# endif
935 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
936 rc = VINF_SUCCESS;
937 }
938 }
939
940#else /* IN_RING3 || IN_RING0 */
941 int rc = pgmLock(pVM);
942 AssertRCReturn(rc, rc);
943
944 /*
945 * Query the Physical TLB entry for the page (may fail).
946 */
947 PPGMPAGEMAPTLBE pTlbe;
948 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
949 if (RT_SUCCESS(rc))
950 {
951 /*
952 * If the page is shared, the zero page, or being write monitored
953 * it must be converted to an page that's writable if possible.
954 */
955 PPGMPAGE pPage = pTlbe->pPage;
956 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
957 {
958 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
959 if (RT_SUCCESS(rc))
960 {
961 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
962 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
963 }
964 }
965 if (RT_SUCCESS(rc))
966 {
967 /*
968 * Now, just perform the locking and calculate the return address.
969 */
970 PPGMPAGEMAP pMap = pTlbe->pMap;
971 if (pMap)
972 pMap->cRefs++;
973# if 0 /** @todo implement locking properly */
974 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
975 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
976 {
977 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
978 if (pMap)
979 pMap->cRefs++; /* Extra ref to prevent it from going away. */
980 }
981# endif
982 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
983 pLock->pvPage = pPage;
984 pLock->pvMap = pMap;
985 }
986 }
987
988 pgmUnlock(pVM);
989#endif /* IN_RING3 || IN_RING0 */
990 return rc;
991}
992
993
994/**
995 * Requests the mapping of a guest page into the current context.
996 *
997 * This API should only be used for very short term, as it will consume
998 * scarse resources (R0 and GC) in the mapping cache. When you're done
999 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1000 *
1001 * @returns VBox status code.
1002 * @retval VINF_SUCCESS on success.
1003 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1004 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1005 *
1006 * @param pVM The VM handle.
1007 * @param GCPhys The guest physical address of the page that should be mapped.
1008 * @param ppv Where to store the address corresponding to GCPhys.
1009 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1010 *
1011 * @remarks The caller is responsible for dealing with access handlers.
1012 * @todo Add an informational return code for pages with access handlers?
1013 *
1014 * @remark Avoid calling this API from within critical sections (other than
1015 * the PGM one) because of the deadlock risk.
1016 * @thread Any thread.
1017 */
1018VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1019{
1020#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1021
1022 /*
1023 * Find the page and make sure it's readable.
1024 */
1025 PPGMPAGE pPage;
1026 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1027 if (RT_SUCCESS(rc))
1028 {
1029 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1030 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1031 else
1032 {
1033 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1034# if 0
1035 pLock->pvMap = 0;
1036 pLock->pvPage = pPage;
1037# else
1038 pLock->u32Dummy = UINT32_MAX;
1039# endif
1040 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1041 rc = VINF_SUCCESS;
1042 }
1043 }
1044
1045#else /* IN_RING3 || IN_RING0 */
1046 int rc = pgmLock(pVM);
1047 AssertRCReturn(rc, rc);
1048
1049 /*
1050 * Query the Physical TLB entry for the page (may fail).
1051 */
1052 PPGMPAGEMAPTLBE pTlbe;
1053 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1054 if (RT_SUCCESS(rc))
1055 {
1056 /* MMIO pages doesn't have any readable backing. */
1057 PPGMPAGE pPage = pTlbe->pPage;
1058 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1059 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1060 else
1061 {
1062 /*
1063 * Now, just perform the locking and calculate the return address.
1064 */
1065 PPGMPAGEMAP pMap = pTlbe->pMap;
1066 if (pMap)
1067 pMap->cRefs++;
1068# if 0 /** @todo implement locking properly */
1069 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
1070 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
1071 {
1072 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
1073 if (pMap)
1074 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1075 }
1076# endif
1077 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1078 pLock->pvPage = pPage;
1079 pLock->pvMap = pMap;
1080 }
1081 }
1082
1083 pgmUnlock(pVM);
1084#endif /* IN_RING3 || IN_RING0 */
1085 return rc;
1086}
1087
1088
1089/**
1090 * Requests the mapping of a guest page given by virtual address into the current context.
1091 *
1092 * This API should only be used for very short term, as it will consume
1093 * scarse resources (R0 and GC) in the mapping cache. When you're done
1094 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1095 *
1096 * This API will assume your intention is to write to the page, and will
1097 * therefore replace shared and zero pages. If you do not intend to modify
1098 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1099 *
1100 * @returns VBox status code.
1101 * @retval VINF_SUCCESS on success.
1102 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1103 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1104 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1105 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1106 *
1107 * @param pVM The VM handle.
1108 * @param GCPhys The guest physical address of the page that should be mapped.
1109 * @param ppv Where to store the address corresponding to GCPhys.
1110 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1111 *
1112 * @remark Avoid calling this API from within critical sections (other than
1113 * the PGM one) because of the deadlock risk.
1114 * @thread EMT
1115 */
1116VMMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1117{
1118 VM_ASSERT_EMT(pVM);
1119 RTGCPHYS GCPhys;
1120 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1121 if (RT_SUCCESS(rc))
1122 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, pLock);
1123 return rc;
1124}
1125
1126
1127/**
1128 * Requests the mapping of a guest page given by virtual address into the current context.
1129 *
1130 * This API should only be used for very short term, as it will consume
1131 * scarse resources (R0 and GC) in the mapping cache. When you're done
1132 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1133 *
1134 * @returns VBox status code.
1135 * @retval VINF_SUCCESS on success.
1136 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1137 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1138 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1139 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1140 *
1141 * @param pVM The VM handle.
1142 * @param GCPhys The guest physical address of the page that should be mapped.
1143 * @param ppv Where to store the address corresponding to GCPhys.
1144 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1145 *
1146 * @remark Avoid calling this API from within critical sections (other than
1147 * the PGM one) because of the deadlock risk.
1148 * @thread EMT
1149 */
1150VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1151{
1152 VM_ASSERT_EMT(pVM);
1153 RTGCPHYS GCPhys;
1154 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1155 if (RT_SUCCESS(rc))
1156 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, pLock);
1157 return rc;
1158}
1159
1160
1161/**
1162 * Release the mapping of a guest page.
1163 *
1164 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1165 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1166 *
1167 * @param pVM The VM handle.
1168 * @param pLock The lock structure initialized by the mapping function.
1169 */
1170VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1171{
1172#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1173 /* currently nothing to do here. */
1174 Assert(pLock->u32Dummy == UINT32_MAX);
1175 pLock->u32Dummy = 0;
1176
1177#else /* IN_RING3 */
1178 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1179 if (!pMap)
1180 {
1181 /* The ZERO page and MMIO2 ends up here. */
1182 Assert(pLock->pvPage);
1183 pLock->pvPage = NULL;
1184 }
1185 else
1186 {
1187 pgmLock(pVM);
1188
1189# if 0 /** @todo implement page locking */
1190 PPGMPAGE pPage = (PPGMPAGE)pLock->pvPage;
1191 Assert(pPage->cLocks >= 1);
1192 if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
1193 pPage->cLocks--;
1194# endif
1195
1196 Assert(pMap->cRefs >= 1);
1197 pMap->cRefs--;
1198 pMap->iAge = 0;
1199
1200 pgmUnlock(pVM);
1201 }
1202#endif /* IN_RING3 */
1203}
1204
1205
1206/**
1207 * Converts a GC physical address to a HC ring-3 pointer.
1208 *
1209 * @returns VINF_SUCCESS on success.
1210 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1211 * page but has no physical backing.
1212 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1213 * GC physical address.
1214 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1215 * a dynamic ram chunk boundary
1216 *
1217 * @param pVM The VM handle.
1218 * @param GCPhys The GC physical address to convert.
1219 * @param cbRange Physical range
1220 * @param pR3Ptr Where to store the R3 pointer on success.
1221 *
1222 * @deprecated Avoid when possible!
1223 */
1224VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1225{
1226/** @todo this is kind of hacky and needs some more work. */
1227 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1228
1229 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1230#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1231 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1232#else
1233 pgmLock(pVM);
1234
1235 PPGMRAMRANGE pRam;
1236 PPGMPAGE pPage;
1237 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1238 if (RT_SUCCESS(rc))
1239 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1240
1241 pgmUnlock(pVM);
1242 Assert(rc <= VINF_SUCCESS);
1243 return rc;
1244#endif
1245}
1246
1247
1248#ifdef VBOX_STRICT
1249/**
1250 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1251 *
1252 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1253 * @param pVM The VM handle.
1254 * @param GCPhys The GC Physical addresss.
1255 * @param cbRange Physical range.
1256 *
1257 * @deprecated Avoid when possible.
1258 */
1259VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1260{
1261 RTR3PTR R3Ptr;
1262 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1263 if (RT_SUCCESS(rc))
1264 return R3Ptr;
1265 return NIL_RTR3PTR;
1266}
1267#endif /* VBOX_STRICT */
1268
1269
1270/**
1271 * Converts a guest pointer to a GC physical address.
1272 *
1273 * This uses the current CR3/CR0/CR4 of the guest.
1274 *
1275 * @returns VBox status code.
1276 * @param pVM The VM Handle
1277 * @param GCPtr The guest pointer to convert.
1278 * @param pGCPhys Where to store the GC physical address.
1279 */
1280VMMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1281{
1282 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1283 if (pGCPhys && RT_SUCCESS(rc))
1284 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1285 return rc;
1286}
1287
1288
1289/**
1290 * Converts a guest pointer to a HC physical address.
1291 *
1292 * This uses the current CR3/CR0/CR4 of the guest.
1293 *
1294 * @returns VBox status code.
1295 * @param pVM The VM Handle
1296 * @param GCPtr The guest pointer to convert.
1297 * @param pHCPhys Where to store the HC physical address.
1298 */
1299VMMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1300{
1301 RTGCPHYS GCPhys;
1302 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1303 if (RT_SUCCESS(rc))
1304 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1305 return rc;
1306}
1307
1308
1309/**
1310 * Converts a guest pointer to a R3 pointer.
1311 *
1312 * This uses the current CR3/CR0/CR4 of the guest.
1313 *
1314 * @returns VBox status code.
1315 * @param pVM The VM Handle
1316 * @param GCPtr The guest pointer to convert.
1317 * @param pR3Ptr Where to store the R3 virtual address.
1318 *
1319 * @deprecated Don't use this.
1320 */
1321VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVM pVM, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1322{
1323 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1324 RTGCPHYS GCPhys;
1325 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1326 if (RT_SUCCESS(rc))
1327 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1328 return rc;
1329}
1330
1331
1332
1333#undef LOG_GROUP
1334#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1335
1336
1337#ifdef IN_RING3
1338/**
1339 * Cache PGMPhys memory access
1340 *
1341 * @param pVM VM Handle.
1342 * @param pCache Cache structure pointer
1343 * @param GCPhys GC physical address
1344 * @param pbHC HC pointer corresponding to physical page
1345 *
1346 * @thread EMT.
1347 */
1348static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1349{
1350 uint32_t iCacheIndex;
1351
1352 Assert(VM_IS_EMT(pVM));
1353
1354 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1355 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1356
1357 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1358
1359 ASMBitSet(&pCache->aEntries, iCacheIndex);
1360
1361 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1362 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1363}
1364#endif /* IN_RING3 */
1365
1366
1367/**
1368 * Deals with reading from a page with one or more ALL access handlers.
1369 *
1370 * @returns VBox status code. Can be ignored in ring-3.
1371 * @retval VINF_SUCCESS.
1372 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1373 *
1374 * @param pVM The VM handle.
1375 * @param pPage The page descriptor.
1376 * @param GCPhys The physical address to start reading at.
1377 * @param pvBuf Where to put the bits we read.
1378 * @param cb How much to read - less or equal to a page.
1379 */
1380static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1381{
1382 /*
1383 * The most frequent access here is MMIO and shadowed ROM.
1384 * The current code ASSUMES all these access handlers covers full pages!
1385 */
1386
1387 /*
1388 * Whatever we do we need the source page, map it first.
1389 */
1390 const void *pvSrc = NULL;
1391 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1392 if (RT_FAILURE(rc))
1393 {
1394 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1395 GCPhys, pPage, rc));
1396 memset(pvBuf, 0xff, cb);
1397 return VINF_SUCCESS;
1398 }
1399 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1400
1401 /*
1402 * Deal with any physical handlers.
1403 */
1404 PPGMPHYSHANDLER pPhys = NULL;
1405 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1406 {
1407#ifdef IN_RING3
1408 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1409 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1410 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1411 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1412 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1413 Assert(pPhys->CTX_SUFF(pfnHandler));
1414
1415 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1416 STAM_PROFILE_START(&pPhys->Stat, h);
1417 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pPhys->CTX_SUFF(pvUser));
1418 STAM_PROFILE_STOP(&pPhys->Stat, h);
1419 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1420#else
1421 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1422 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1423 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1424#endif
1425 }
1426
1427 /*
1428 * Deal with any virtual handlers.
1429 */
1430 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1431 {
1432 unsigned iPage;
1433 PPGMVIRTHANDLER pVirt;
1434
1435 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1436 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1437 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1438 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1439 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1440
1441#ifdef IN_RING3
1442 if (pVirt->pfnHandlerR3)
1443 {
1444 if (!pPhys)
1445 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1446 else
1447 Log(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1448 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1449 + (iPage << PAGE_SHIFT)
1450 + (GCPhys & PAGE_OFFSET_MASK);
1451
1452 STAM_PROFILE_START(&pVirt->Stat, h);
1453 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1454 STAM_PROFILE_STOP(&pVirt->Stat, h);
1455 if (rc2 == VINF_SUCCESS)
1456 rc = VINF_SUCCESS;
1457 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1458 }
1459 else
1460 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1461#else
1462 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1463 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1464 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1465#endif
1466 }
1467
1468 /*
1469 * Take the default action.
1470 */
1471 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1472 memcpy(pvBuf, pvSrc, cb);
1473 return rc;
1474}
1475
1476
1477/**
1478 * Read physical memory.
1479 *
1480 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1481 * want to ignore those.
1482 *
1483 * @returns VBox status code. Can be ignored in ring-3.
1484 * @retval VINF_SUCCESS.
1485 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1486 *
1487 * @param pVM VM Handle.
1488 * @param GCPhys Physical address start reading from.
1489 * @param pvBuf Where to put the read bits.
1490 * @param cbRead How many bytes to read.
1491 */
1492VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1493{
1494 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1495 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1496
1497 pgmLock(pVM);
1498
1499 /*
1500 * Copy loop on ram ranges.
1501 */
1502 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1503 for (;;)
1504 {
1505 /* Find range. */
1506 while (pRam && GCPhys > pRam->GCPhysLast)
1507 pRam = pRam->CTX_SUFF(pNext);
1508 /* Inside range or not? */
1509 if (pRam && GCPhys >= pRam->GCPhys)
1510 {
1511 /*
1512 * Must work our way thru this page by page.
1513 */
1514 RTGCPHYS off = GCPhys - pRam->GCPhys;
1515 while (off < pRam->cb)
1516 {
1517 unsigned iPage = off >> PAGE_SHIFT;
1518 PPGMPAGE pPage = &pRam->aPages[iPage];
1519 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1520 if (cb > cbRead)
1521 cb = cbRead;
1522
1523 /*
1524 * Any ALL access handlers?
1525 */
1526 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1527 {
1528 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1529 if (RT_FAILURE(rc))
1530 return rc;
1531 }
1532 else
1533 {
1534 /*
1535 * Get the pointer to the page.
1536 */
1537 const void *pvSrc;
1538 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1539 if (RT_SUCCESS(rc))
1540 memcpy(pvBuf, pvSrc, cb);
1541 else
1542 {
1543 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1544 pRam->GCPhys + off, pPage, rc));
1545 memset(pvBuf, 0xff, cb);
1546 }
1547 }
1548
1549 /* next page */
1550 if (cb >= cbRead)
1551 {
1552 pgmUnlock(pVM);
1553 return VINF_SUCCESS;
1554 }
1555 cbRead -= cb;
1556 off += cb;
1557 pvBuf = (char *)pvBuf + cb;
1558 } /* walk pages in ram range. */
1559
1560 GCPhys = pRam->GCPhysLast + 1;
1561 }
1562 else
1563 {
1564 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1565
1566 /*
1567 * Unassigned address space.
1568 */
1569 if (!pRam)
1570 break;
1571 size_t cb = pRam->GCPhys - GCPhys;
1572 if (cb >= cbRead)
1573 {
1574 memset(pvBuf, 0xff, cbRead);
1575 break;
1576 }
1577 memset(pvBuf, 0xff, cb);
1578
1579 cbRead -= cb;
1580 pvBuf = (char *)pvBuf + cb;
1581 GCPhys += cb;
1582 }
1583 } /* Ram range walk */
1584
1585 pgmUnlock(pVM);
1586 return VINF_SUCCESS;
1587}
1588
1589
1590/**
1591 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1592 *
1593 * @returns VBox status code. Can be ignored in ring-3.
1594 * @retval VINF_SUCCESS.
1595 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1596 *
1597 * @param pVM The VM handle.
1598 * @param pPage The page descriptor.
1599 * @param GCPhys The physical address to start writing at.
1600 * @param pvBuf What to write.
1601 * @param cbWrite How much to write - less or equal to a page.
1602 */
1603static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1604{
1605 void *pvDst = NULL;
1606 int rc;
1607
1608 /*
1609 * Give priority to physical handlers (like #PF does).
1610 *
1611 * Hope for a lonely physical handler first that covers the whole
1612 * write area. This should be a pretty frequent case with MMIO and
1613 * the heavy usage of full page handlers in the page pool.
1614 */
1615 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1616 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1617 {
1618 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1619 if (pCur)
1620 {
1621 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1622 Assert(pCur->CTX_SUFF(pfnHandler));
1623
1624 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1625 if (cbRange > cbWrite)
1626 cbRange = cbWrite;
1627
1628#ifndef IN_RING3
1629 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1630 NOREF(cbRange);
1631 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1632 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1633
1634#else /* IN_RING3 */
1635 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1636 if (!PGM_PAGE_IS_MMIO(pPage))
1637 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1638 else
1639 rc = VINF_SUCCESS;
1640 if (RT_SUCCESS(rc))
1641 {
1642 STAM_PROFILE_START(&pCur->Stat, h);
1643 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pCur->CTX_SUFF(pvUser));
1644 STAM_PROFILE_STOP(&pCur->Stat, h);
1645 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1646 memcpy(pvDst, pvBuf, cbRange);
1647 else
1648 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1649 }
1650 else
1651 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1652 GCPhys, pPage, rc), rc);
1653 if (RT_LIKELY(cbRange == cbWrite))
1654 return VINF_SUCCESS;
1655
1656 /* more fun to be had below */
1657 cbWrite -= cbRange;
1658 GCPhys += cbRange;
1659 pvBuf = (uint8_t *)pvBuf + cbRange;
1660 pvDst = (uint8_t *)pvDst + cbRange;
1661#endif /* IN_RING3 */
1662 }
1663 /* else: the handler is somewhere else in the page, deal with it below. */
1664 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1665 }
1666 /*
1667 * A virtual handler without any interfering physical handlers.
1668 * Hopefully it'll conver the whole write.
1669 */
1670 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1671 {
1672 unsigned iPage;
1673 PPGMVIRTHANDLER pCur;
1674 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1675 if (RT_SUCCESS(rc))
1676 {
1677 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1678 if (cbRange > cbWrite)
1679 cbRange = cbWrite;
1680
1681#ifndef IN_RING3
1682 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1683 NOREF(cbRange);
1684 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1685 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1686
1687#else /* IN_RING3 */
1688
1689 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1690 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1691 if (RT_SUCCESS(rc))
1692 {
1693 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1694 if (pCur->pfnHandlerR3)
1695 {
1696 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1697 + (iPage << PAGE_SHIFT)
1698 + (GCPhys & PAGE_OFFSET_MASK);
1699
1700 STAM_PROFILE_START(&pCur->Stat, h);
1701 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1702 STAM_PROFILE_STOP(&pCur->Stat, h);
1703 }
1704 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1705 memcpy(pvDst, pvBuf, cbRange);
1706 else
1707 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1708 }
1709 else
1710 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1711 GCPhys, pPage, rc), rc);
1712 if (RT_LIKELY(cbRange == cbWrite))
1713 return VINF_SUCCESS;
1714
1715 /* more fun to be had below */
1716 cbWrite -= cbRange;
1717 GCPhys += cbRange;
1718 pvBuf = (uint8_t *)pvBuf + cbRange;
1719 pvDst = (uint8_t *)pvDst + cbRange;
1720#endif
1721 }
1722 /* else: the handler is somewhere else in the page, deal with it below. */
1723 }
1724
1725 /*
1726 * Deal with all the odd ends.
1727 */
1728
1729 /* We need a writable destination page. */
1730 if (!pvDst)
1731 {
1732 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1733 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1734 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1735 GCPhys, pPage, rc), rc);
1736 }
1737
1738 /* The loop state (big + ugly). */
1739 unsigned iVirtPage = 0;
1740 PPGMVIRTHANDLER pVirt = NULL;
1741 uint32_t offVirt = PAGE_SIZE;
1742 uint32_t offVirtLast = PAGE_SIZE;
1743 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1744
1745 PPGMPHYSHANDLER pPhys = NULL;
1746 uint32_t offPhys = PAGE_SIZE;
1747 uint32_t offPhysLast = PAGE_SIZE;
1748 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1749
1750 /* The loop. */
1751 for (;;)
1752 {
1753 /*
1754 * Find the closest handler at or above GCPhys.
1755 */
1756 if (fMoreVirt && !pVirt)
1757 {
1758 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1759 if (RT_SUCCESS(rc))
1760 {
1761 offVirt = 0;
1762 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1763 }
1764 else
1765 {
1766 PPGMPHYS2VIRTHANDLER pVirtPhys;
1767 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1768 GCPhys, true /* fAbove */);
1769 if ( pVirtPhys
1770 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1771 {
1772 /* ASSUME that pVirtPhys only covers one page. */
1773 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1774 Assert(pVirtPhys->Core.Key > GCPhys);
1775
1776 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1777 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1778 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1779 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1780 }
1781 else
1782 {
1783 pVirt = NULL;
1784 fMoreVirt = false;
1785 offVirt = offVirtLast = PAGE_SIZE;
1786 }
1787 }
1788 }
1789
1790 if (fMorePhys && !pPhys)
1791 {
1792 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1793 if (pPhys)
1794 {
1795 offPhys = 0;
1796 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1797 }
1798 else
1799 {
1800 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
1801 GCPhys, true /* fAbove */);
1802 if ( pPhys
1803 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
1804 {
1805 offPhys = pPhys->Core.Key - GCPhys;
1806 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1807 }
1808 else
1809 {
1810 pPhys = NULL;
1811 fMorePhys = false;
1812 offPhys = offPhysLast = PAGE_SIZE;
1813 }
1814 }
1815 }
1816
1817 /*
1818 * Handle access to space without handlers (that's easy).
1819 */
1820 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1821 uint32_t cbRange = (uint32_t)cbWrite;
1822 if (offPhys && offVirt)
1823 {
1824 if (cbRange > offPhys)
1825 cbRange = offPhys;
1826 if (cbRange > offVirt)
1827 cbRange = offVirt;
1828 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
1829 }
1830 /*
1831 * Physical handler.
1832 */
1833 else if (!offPhys && offVirt)
1834 {
1835 if (cbRange > offPhysLast + 1)
1836 cbRange = offPhysLast + 1;
1837 if (cbRange > offVirt)
1838 cbRange = offVirt;
1839#ifdef IN_RING3
1840 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
1841 STAM_PROFILE_START(&pPhys->Stat, h);
1842 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
1843 STAM_PROFILE_STOP(&pPhys->Stat, h);
1844 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
1845 pPhys = NULL;
1846#else
1847 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1848 NOREF(cbRange);
1849 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1850 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1851#endif
1852 }
1853 /*
1854 * Virtual handler.
1855 */
1856 else if (offPhys && !offVirt)
1857 {
1858 if (cbRange > offVirtLast + 1)
1859 cbRange = offVirtLast + 1;
1860 if (cbRange > offPhys)
1861 cbRange = offPhys;
1862#ifdef IN_RING3
1863 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
1864 if (pVirt->pfnHandlerR3)
1865 {
1866 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1867 + (iVirtPage << PAGE_SHIFT)
1868 + (GCPhys & PAGE_OFFSET_MASK);
1869 STAM_PROFILE_START(&pVirt->Stat, h);
1870 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1871 STAM_PROFILE_STOP(&pVirt->Stat, h);
1872 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
1873 }
1874 pVirt = NULL;
1875#else
1876 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1877 NOREF(cbRange);
1878 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1879 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1880#endif
1881 }
1882 /*
1883 * Both... give the physical one priority.
1884 */
1885 else
1886 {
1887 Assert(!offPhys && !offVirt);
1888 if (cbRange > offVirtLast + 1)
1889 cbRange = offVirtLast + 1;
1890 if (cbRange > offPhysLast + 1)
1891 cbRange = offPhysLast + 1;
1892
1893#ifdef IN_RING3
1894 if (pVirt->pfnHandlerR3)
1895 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
1896 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
1897
1898 STAM_PROFILE_START(&pPhys->Stat, h);
1899 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
1900 STAM_PROFILE_STOP(&pPhys->Stat, h);
1901 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
1902 if (pVirt->pfnHandlerR3)
1903 {
1904
1905 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1906 + (iVirtPage << PAGE_SHIFT)
1907 + (GCPhys & PAGE_OFFSET_MASK);
1908 STAM_PROFILE_START(&pVirt->Stat, h);
1909 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1910 STAM_PROFILE_STOP(&pVirt->Stat, h);
1911 AssertLogRelMsg(rc2 != VINF_SUCCESS && rc2 != VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
1912 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
1913 rc = VINF_SUCCESS;
1914 }
1915 pPhys = NULL;
1916 pVirt = NULL;
1917#else
1918 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1919 NOREF(cbRange);
1920 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1921 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1922#endif
1923 }
1924 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1925 memcpy(pvDst, pvBuf, cbRange);
1926
1927 /*
1928 * Advance if we've got more stuff to do.
1929 */
1930 if (cbRange >= cbWrite)
1931 return VINF_SUCCESS;
1932
1933 cbWrite -= cbRange;
1934 GCPhys += cbRange;
1935 pvBuf = (uint8_t *)pvBuf + cbRange;
1936 pvDst = (uint8_t *)pvDst + cbRange;
1937
1938 offPhys -= cbRange;
1939 offPhysLast -= cbRange;
1940 offVirt -= cbRange;
1941 offVirtLast -= cbRange;
1942 }
1943}
1944
1945
1946/**
1947 * Write to physical memory.
1948 *
1949 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1950 * want to ignore those.
1951 *
1952 * @returns VBox status code. Can be ignored in ring-3.
1953 * @retval VINF_SUCCESS.
1954 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1955 *
1956 * @param pVM VM Handle.
1957 * @param GCPhys Physical address to write to.
1958 * @param pvBuf What to write.
1959 * @param cbWrite How many bytes to write.
1960 */
1961VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
1962{
1963 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
1964 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
1965 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
1966
1967 pgmLock(pVM);
1968
1969 /*
1970 * Copy loop on ram ranges.
1971 */
1972 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1973 for (;;)
1974 {
1975 /* Find range. */
1976 while (pRam && GCPhys > pRam->GCPhysLast)
1977 pRam = pRam->CTX_SUFF(pNext);
1978 /* Inside range or not? */
1979 if (pRam && GCPhys >= pRam->GCPhys)
1980 {
1981 /*
1982 * Must work our way thru this page by page.
1983 */
1984 RTGCPTR off = GCPhys - pRam->GCPhys;
1985 while (off < pRam->cb)
1986 {
1987 RTGCPTR iPage = off >> PAGE_SHIFT;
1988 PPGMPAGE pPage = &pRam->aPages[iPage];
1989 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1990 if (cb > cbWrite)
1991 cb = cbWrite;
1992
1993 /*
1994 * Any active WRITE or ALL access handlers?
1995 */
1996 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1997 {
1998 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1999 if (RT_FAILURE(rc))
2000 return rc;
2001 }
2002 else
2003 {
2004 /*
2005 * Get the pointer to the page.
2006 */
2007 void *pvDst;
2008 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2009 if (RT_SUCCESS(rc))
2010 memcpy(pvDst, pvBuf, cb);
2011 else
2012 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2013 pRam->GCPhys + off, pPage, rc));
2014 }
2015
2016 /* next page */
2017 if (cb >= cbWrite)
2018 {
2019 pgmUnlock(pVM);
2020 return VINF_SUCCESS;
2021 }
2022
2023 cbWrite -= cb;
2024 off += cb;
2025 pvBuf = (const char *)pvBuf + cb;
2026 } /* walk pages in ram range */
2027
2028 GCPhys = pRam->GCPhysLast + 1;
2029 }
2030 else
2031 {
2032 /*
2033 * Unassigned address space, skip it.
2034 */
2035 if (!pRam)
2036 break;
2037 size_t cb = pRam->GCPhys - GCPhys;
2038 if (cb >= cbWrite)
2039 break;
2040 cbWrite -= cb;
2041 pvBuf = (const char *)pvBuf + cb;
2042 GCPhys += cb;
2043 }
2044 } /* Ram range walk */
2045
2046 pgmUnlock(pVM);
2047 return VINF_SUCCESS;
2048}
2049
2050
2051/**
2052 * Read from guest physical memory by GC physical address, bypassing
2053 * MMIO and access handlers.
2054 *
2055 * @returns VBox status.
2056 * @param pVM VM handle.
2057 * @param pvDst The destination address.
2058 * @param GCPhysSrc The source address (GC physical address).
2059 * @param cb The number of bytes to read.
2060 */
2061VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2062{
2063 /*
2064 * Treat the first page as a special case.
2065 */
2066 if (!cb)
2067 return VINF_SUCCESS;
2068
2069 /* map the 1st page */
2070 void const *pvSrc;
2071 PGMPAGEMAPLOCK Lock;
2072 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2073 if (RT_FAILURE(rc))
2074 return rc;
2075
2076 /* optimize for the case where access is completely within the first page. */
2077 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2078 if (RT_LIKELY(cb <= cbPage))
2079 {
2080 memcpy(pvDst, pvSrc, cb);
2081 PGMPhysReleasePageMappingLock(pVM, &Lock);
2082 return VINF_SUCCESS;
2083 }
2084
2085 /* copy to the end of the page. */
2086 memcpy(pvDst, pvSrc, cbPage);
2087 PGMPhysReleasePageMappingLock(pVM, &Lock);
2088 GCPhysSrc += cbPage;
2089 pvDst = (uint8_t *)pvDst + cbPage;
2090 cb -= cbPage;
2091
2092 /*
2093 * Page by page.
2094 */
2095 for (;;)
2096 {
2097 /* map the page */
2098 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2099 if (RT_FAILURE(rc))
2100 return rc;
2101
2102 /* last page? */
2103 if (cb <= PAGE_SIZE)
2104 {
2105 memcpy(pvDst, pvSrc, cb);
2106 PGMPhysReleasePageMappingLock(pVM, &Lock);
2107 return VINF_SUCCESS;
2108 }
2109
2110 /* copy the entire page and advance */
2111 memcpy(pvDst, pvSrc, PAGE_SIZE);
2112 PGMPhysReleasePageMappingLock(pVM, &Lock);
2113 GCPhysSrc += PAGE_SIZE;
2114 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2115 cb -= PAGE_SIZE;
2116 }
2117 /* won't ever get here. */
2118}
2119
2120#ifndef IN_RC /* Ring 0 & 3 only. (Just not needed in GC.) */
2121
2122/**
2123 * Write to guest physical memory referenced by GC pointer.
2124 * Write memory to GC physical address in guest physical memory.
2125 *
2126 * This will bypass MMIO and access handlers.
2127 *
2128 * @returns VBox status.
2129 * @param pVM VM handle.
2130 * @param GCPhysDst The GC physical address of the destination.
2131 * @param pvSrc The source buffer.
2132 * @param cb The number of bytes to write.
2133 */
2134VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2135{
2136 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2137
2138 /*
2139 * Treat the first page as a special case.
2140 */
2141 if (!cb)
2142 return VINF_SUCCESS;
2143
2144 /* map the 1st page */
2145 void *pvDst;
2146 PGMPAGEMAPLOCK Lock;
2147 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2148 if (RT_FAILURE(rc))
2149 return rc;
2150
2151 /* optimize for the case where access is completely within the first page. */
2152 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2153 if (RT_LIKELY(cb <= cbPage))
2154 {
2155 memcpy(pvDst, pvSrc, cb);
2156 PGMPhysReleasePageMappingLock(pVM, &Lock);
2157 return VINF_SUCCESS;
2158 }
2159
2160 /* copy to the end of the page. */
2161 memcpy(pvDst, pvSrc, cbPage);
2162 PGMPhysReleasePageMappingLock(pVM, &Lock);
2163 GCPhysDst += cbPage;
2164 pvSrc = (const uint8_t *)pvSrc + cbPage;
2165 cb -= cbPage;
2166
2167 /*
2168 * Page by page.
2169 */
2170 for (;;)
2171 {
2172 /* map the page */
2173 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2174 if (RT_FAILURE(rc))
2175 return rc;
2176
2177 /* last page? */
2178 if (cb <= PAGE_SIZE)
2179 {
2180 memcpy(pvDst, pvSrc, cb);
2181 PGMPhysReleasePageMappingLock(pVM, &Lock);
2182 return VINF_SUCCESS;
2183 }
2184
2185 /* copy the entire page and advance */
2186 memcpy(pvDst, pvSrc, PAGE_SIZE);
2187 PGMPhysReleasePageMappingLock(pVM, &Lock);
2188 GCPhysDst += PAGE_SIZE;
2189 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2190 cb -= PAGE_SIZE;
2191 }
2192 /* won't ever get here. */
2193}
2194
2195
2196/**
2197 * Read from guest physical memory referenced by GC pointer.
2198 *
2199 * This function uses the current CR3/CR0/CR4 of the guest and will
2200 * bypass access handlers and not set any accessed bits.
2201 *
2202 * @returns VBox status.
2203 * @param pVM VM handle.
2204 * @param pvDst The destination address.
2205 * @param GCPtrSrc The source address (GC pointer).
2206 * @param cb The number of bytes to read.
2207 */
2208VMMDECL(int) PGMPhysSimpleReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2209{
2210 /*
2211 * Treat the first page as a special case.
2212 */
2213 if (!cb)
2214 return VINF_SUCCESS;
2215
2216 /* map the 1st page */
2217 void const *pvSrc;
2218 PGMPAGEMAPLOCK Lock;
2219 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2220 if (RT_FAILURE(rc))
2221 return rc;
2222
2223 /* optimize for the case where access is completely within the first page. */
2224 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2225 if (RT_LIKELY(cb <= cbPage))
2226 {
2227 memcpy(pvDst, pvSrc, cb);
2228 PGMPhysReleasePageMappingLock(pVM, &Lock);
2229 return VINF_SUCCESS;
2230 }
2231
2232 /* copy to the end of the page. */
2233 memcpy(pvDst, pvSrc, cbPage);
2234 PGMPhysReleasePageMappingLock(pVM, &Lock);
2235 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2236 pvDst = (uint8_t *)pvDst + cbPage;
2237 cb -= cbPage;
2238
2239 /*
2240 * Page by page.
2241 */
2242 for (;;)
2243 {
2244 /* map the page */
2245 rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2246 if (RT_FAILURE(rc))
2247 return rc;
2248
2249 /* last page? */
2250 if (cb <= PAGE_SIZE)
2251 {
2252 memcpy(pvDst, pvSrc, cb);
2253 PGMPhysReleasePageMappingLock(pVM, &Lock);
2254 return VINF_SUCCESS;
2255 }
2256
2257 /* copy the entire page and advance */
2258 memcpy(pvDst, pvSrc, PAGE_SIZE);
2259 PGMPhysReleasePageMappingLock(pVM, &Lock);
2260 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2261 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2262 cb -= PAGE_SIZE;
2263 }
2264 /* won't ever get here. */
2265}
2266
2267
2268/**
2269 * Write to guest physical memory referenced by GC pointer.
2270 *
2271 * This function uses the current CR3/CR0/CR4 of the guest and will
2272 * bypass access handlers and not set dirty or accessed bits.
2273 *
2274 * @returns VBox status.
2275 * @param pVM VM handle.
2276 * @param GCPtrDst The destination address (GC pointer).
2277 * @param pvSrc The source address.
2278 * @param cb The number of bytes to write.
2279 */
2280VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2281{
2282 /*
2283 * Treat the first page as a special case.
2284 */
2285 if (!cb)
2286 return VINF_SUCCESS;
2287
2288 /* map the 1st page */
2289 void *pvDst;
2290 PGMPAGEMAPLOCK Lock;
2291 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2292 if (RT_FAILURE(rc))
2293 return rc;
2294
2295 /* optimize for the case where access is completely within the first page. */
2296 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2297 if (RT_LIKELY(cb <= cbPage))
2298 {
2299 memcpy(pvDst, pvSrc, cb);
2300 PGMPhysReleasePageMappingLock(pVM, &Lock);
2301 return VINF_SUCCESS;
2302 }
2303
2304 /* copy to the end of the page. */
2305 memcpy(pvDst, pvSrc, cbPage);
2306 PGMPhysReleasePageMappingLock(pVM, &Lock);
2307 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2308 pvSrc = (const uint8_t *)pvSrc + cbPage;
2309 cb -= cbPage;
2310
2311 /*
2312 * Page by page.
2313 */
2314 for (;;)
2315 {
2316 /* map the page */
2317 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2318 if (RT_FAILURE(rc))
2319 return rc;
2320
2321 /* last page? */
2322 if (cb <= PAGE_SIZE)
2323 {
2324 memcpy(pvDst, pvSrc, cb);
2325 PGMPhysReleasePageMappingLock(pVM, &Lock);
2326 return VINF_SUCCESS;
2327 }
2328
2329 /* copy the entire page and advance */
2330 memcpy(pvDst, pvSrc, PAGE_SIZE);
2331 PGMPhysReleasePageMappingLock(pVM, &Lock);
2332 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2333 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2334 cb -= PAGE_SIZE;
2335 }
2336 /* won't ever get here. */
2337}
2338
2339
2340/**
2341 * Write to guest physical memory referenced by GC pointer and update the PTE.
2342 *
2343 * This function uses the current CR3/CR0/CR4 of the guest and will
2344 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2345 *
2346 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2347 *
2348 * @returns VBox status.
2349 * @param pVM VM handle.
2350 * @param GCPtrDst The destination address (GC pointer).
2351 * @param pvSrc The source address.
2352 * @param cb The number of bytes to write.
2353 */
2354VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2355{
2356 /*
2357 * Treat the first page as a special case.
2358 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2359 */
2360 if (!cb)
2361 return VINF_SUCCESS;
2362
2363 /* map the 1st page */
2364 void *pvDst;
2365 PGMPAGEMAPLOCK Lock;
2366 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2367 if (RT_FAILURE(rc))
2368 return rc;
2369
2370 /* optimize for the case where access is completely within the first page. */
2371 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2372 if (RT_LIKELY(cb <= cbPage))
2373 {
2374 memcpy(pvDst, pvSrc, cb);
2375 PGMPhysReleasePageMappingLock(pVM, &Lock);
2376 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2377 return VINF_SUCCESS;
2378 }
2379
2380 /* copy to the end of the page. */
2381 memcpy(pvDst, pvSrc, cbPage);
2382 PGMPhysReleasePageMappingLock(pVM, &Lock);
2383 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2384 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2385 pvSrc = (const uint8_t *)pvSrc + cbPage;
2386 cb -= cbPage;
2387
2388 /*
2389 * Page by page.
2390 */
2391 for (;;)
2392 {
2393 /* map the page */
2394 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2395 if (RT_FAILURE(rc))
2396 return rc;
2397
2398 /* last page? */
2399 if (cb <= PAGE_SIZE)
2400 {
2401 memcpy(pvDst, pvSrc, cb);
2402 PGMPhysReleasePageMappingLock(pVM, &Lock);
2403 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2404 return VINF_SUCCESS;
2405 }
2406
2407 /* copy the entire page and advance */
2408 memcpy(pvDst, pvSrc, PAGE_SIZE);
2409 PGMPhysReleasePageMappingLock(pVM, &Lock);
2410 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2411 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2412 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2413 cb -= PAGE_SIZE;
2414 }
2415 /* won't ever get here. */
2416}
2417
2418
2419/**
2420 * Read from guest physical memory referenced by GC pointer.
2421 *
2422 * This function uses the current CR3/CR0/CR4 of the guest and will
2423 * respect access handlers and set accessed bits.
2424 *
2425 * @returns VBox status.
2426 * @param pVM VM handle.
2427 * @param pvDst The destination address.
2428 * @param GCPtrSrc The source address (GC pointer).
2429 * @param cb The number of bytes to read.
2430 * @thread The vCPU EMT.
2431 */
2432VMMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2433{
2434 RTGCPHYS GCPhys;
2435 uint64_t fFlags;
2436 int rc;
2437
2438 /*
2439 * Anything to do?
2440 */
2441 if (!cb)
2442 return VINF_SUCCESS;
2443
2444 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2445
2446 /*
2447 * Optimize reads within a single page.
2448 */
2449 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2450 {
2451 /* Convert virtual to physical address + flags */
2452 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2453 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2454 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2455
2456 /* mark the guest page as accessed. */
2457 if (!(fFlags & X86_PTE_A))
2458 {
2459 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2460 AssertRC(rc);
2461 }
2462
2463 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2464 }
2465
2466 /*
2467 * Page by page.
2468 */
2469 for (;;)
2470 {
2471 /* Convert virtual to physical address + flags */
2472 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2473 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2474 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2475
2476 /* mark the guest page as accessed. */
2477 if (!(fFlags & X86_PTE_A))
2478 {
2479 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2480 AssertRC(rc);
2481 }
2482
2483 /* copy */
2484 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2485 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2486 if (cbRead >= cb || RT_FAILURE(rc))
2487 return rc;
2488
2489 /* next */
2490 cb -= cbRead;
2491 pvDst = (uint8_t *)pvDst + cbRead;
2492 GCPtrSrc += cbRead;
2493 }
2494}
2495
2496
2497/**
2498 * Write to guest physical memory referenced by GC pointer.
2499 *
2500 * This function uses the current CR3/CR0/CR4 of the guest and will
2501 * respect access handlers and set dirty and accessed bits.
2502 *
2503 * @returns VBox status.
2504 * @retval VINF_SUCCESS.
2505 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2506 *
2507 * @param pVM VM handle.
2508 * @param GCPtrDst The destination address (GC pointer).
2509 * @param pvSrc The source address.
2510 * @param cb The number of bytes to write.
2511 */
2512VMMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2513{
2514 RTGCPHYS GCPhys;
2515 uint64_t fFlags;
2516 int rc;
2517
2518 /*
2519 * Anything to do?
2520 */
2521 if (!cb)
2522 return VINF_SUCCESS;
2523
2524 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2525
2526 /*
2527 * Optimize writes within a single page.
2528 */
2529 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2530 {
2531 /* Convert virtual to physical address + flags */
2532 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2533 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2534 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2535
2536 /* Mention when we ignore X86_PTE_RW... */
2537 if (!(fFlags & X86_PTE_RW))
2538 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2539
2540 /* Mark the guest page as accessed and dirty if necessary. */
2541 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2542 {
2543 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2544 AssertRC(rc);
2545 }
2546
2547 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2548 }
2549
2550 /*
2551 * Page by page.
2552 */
2553 for (;;)
2554 {
2555 /* Convert virtual to physical address + flags */
2556 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2557 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2558 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2559
2560 /* Mention when we ignore X86_PTE_RW... */
2561 if (!(fFlags & X86_PTE_RW))
2562 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2563
2564 /* Mark the guest page as accessed and dirty if necessary. */
2565 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2566 {
2567 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2568 AssertRC(rc);
2569 }
2570
2571 /* copy */
2572 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2573 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2574 if (cbWrite >= cb || RT_FAILURE(rc))
2575 return rc;
2576
2577 /* next */
2578 cb -= cbWrite;
2579 pvSrc = (uint8_t *)pvSrc + cbWrite;
2580 GCPtrDst += cbWrite;
2581 }
2582}
2583
2584#endif /* !IN_RC */
2585
2586/**
2587 * Performs a read of guest virtual memory for instruction emulation.
2588 *
2589 * This will check permissions, raise exceptions and update the access bits.
2590 *
2591 * The current implementation will bypass all access handlers. It may later be
2592 * changed to at least respect MMIO.
2593 *
2594 *
2595 * @returns VBox status code suitable to scheduling.
2596 * @retval VINF_SUCCESS if the read was performed successfully.
2597 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2598 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2599 *
2600 * @param pVM The VM handle.
2601 * @param pCtxCore The context core.
2602 * @param pvDst Where to put the bytes we've read.
2603 * @param GCPtrSrc The source address.
2604 * @param cb The number of bytes to read. Not more than a page.
2605 *
2606 * @remark This function will dynamically map physical pages in GC. This may unmap
2607 * mappings done by the caller. Be careful!
2608 */
2609VMMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2610{
2611 Assert(cb <= PAGE_SIZE);
2612
2613/** @todo r=bird: This isn't perfect!
2614 * -# It's not checking for reserved bits being 1.
2615 * -# It's not correctly dealing with the access bit.
2616 * -# It's not respecting MMIO memory or any other access handlers.
2617 */
2618 /*
2619 * 1. Translate virtual to physical. This may fault.
2620 * 2. Map the physical address.
2621 * 3. Do the read operation.
2622 * 4. Set access bits if required.
2623 */
2624 int rc;
2625 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2626 if (cb <= cb1)
2627 {
2628 /*
2629 * Not crossing pages.
2630 */
2631 RTGCPHYS GCPhys;
2632 uint64_t fFlags;
2633 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
2634 if (RT_SUCCESS(rc))
2635 {
2636 /** @todo we should check reserved bits ... */
2637 void *pvSrc;
2638 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2639 switch (rc)
2640 {
2641 case VINF_SUCCESS:
2642 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2643 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2644 break;
2645 case VERR_PGM_PHYS_PAGE_RESERVED:
2646 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2647 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2648 break;
2649 default:
2650 return rc;
2651 }
2652
2653 /** @todo access bit emulation isn't 100% correct. */
2654 if (!(fFlags & X86_PTE_A))
2655 {
2656 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2657 AssertRC(rc);
2658 }
2659 return VINF_SUCCESS;
2660 }
2661 }
2662 else
2663 {
2664 /*
2665 * Crosses pages.
2666 */
2667 size_t cb2 = cb - cb1;
2668 uint64_t fFlags1;
2669 RTGCPHYS GCPhys1;
2670 uint64_t fFlags2;
2671 RTGCPHYS GCPhys2;
2672 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
2673 if (RT_SUCCESS(rc))
2674 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2675 if (RT_SUCCESS(rc))
2676 {
2677 /** @todo we should check reserved bits ... */
2678 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2679 void *pvSrc1;
2680 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2681 switch (rc)
2682 {
2683 case VINF_SUCCESS:
2684 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2685 break;
2686 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2687 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2688 break;
2689 default:
2690 return rc;
2691 }
2692
2693 void *pvSrc2;
2694 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2695 switch (rc)
2696 {
2697 case VINF_SUCCESS:
2698 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2699 break;
2700 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2701 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2702 break;
2703 default:
2704 return rc;
2705 }
2706
2707 if (!(fFlags1 & X86_PTE_A))
2708 {
2709 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2710 AssertRC(rc);
2711 }
2712 if (!(fFlags2 & X86_PTE_A))
2713 {
2714 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2715 AssertRC(rc);
2716 }
2717 return VINF_SUCCESS;
2718 }
2719 }
2720
2721 /*
2722 * Raise a #PF.
2723 */
2724 uint32_t uErr;
2725
2726 /* Get the current privilege level. */
2727 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
2728 switch (rc)
2729 {
2730 case VINF_SUCCESS:
2731 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2732 break;
2733
2734 case VERR_PAGE_NOT_PRESENT:
2735 case VERR_PAGE_TABLE_NOT_PRESENT:
2736 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2737 break;
2738
2739 default:
2740 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2741 return rc;
2742 }
2743 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2744 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2745}
2746
2747
2748/**
2749 * Performs a read of guest virtual memory for instruction emulation.
2750 *
2751 * This will check permissions, raise exceptions and update the access bits.
2752 *
2753 * The current implementation will bypass all access handlers. It may later be
2754 * changed to at least respect MMIO.
2755 *
2756 *
2757 * @returns VBox status code suitable to scheduling.
2758 * @retval VINF_SUCCESS if the read was performed successfully.
2759 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2760 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2761 *
2762 * @param pVM The VM handle.
2763 * @param pCtxCore The context core.
2764 * @param pvDst Where to put the bytes we've read.
2765 * @param GCPtrSrc The source address.
2766 * @param cb The number of bytes to read. Not more than a page.
2767 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
2768 * an appropriate error status will be returned (no
2769 * informational at all).
2770 *
2771 *
2772 * @remarks Takes the PGM lock.
2773 * @remarks A page fault on the 2nd page of the access will be raised without
2774 * writing the bits on the first page since we're ASSUMING that the
2775 * caller is emulating an instruction access.
2776 * @remarks This function will dynamically map physical pages in GC. This may
2777 * unmap mappings done by the caller. Be careful!
2778 */
2779VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
2780{
2781 Assert(cb <= PAGE_SIZE);
2782
2783 /*
2784 * 1. Translate virtual to physical. This may fault.
2785 * 2. Map the physical address.
2786 * 3. Do the read operation.
2787 * 4. Set access bits if required.
2788 */
2789 int rc;
2790 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2791 if (cb <= cb1)
2792 {
2793 /*
2794 * Not crossing pages.
2795 */
2796 RTGCPHYS GCPhys;
2797 uint64_t fFlags;
2798 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
2799 if (RT_SUCCESS(rc))
2800 {
2801 if (1) /** @todo we should check reserved bits ... */
2802 {
2803 const void *pvSrc;
2804 PGMPAGEMAPLOCK Lock;
2805 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
2806 switch (rc)
2807 {
2808 case VINF_SUCCESS:
2809 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
2810 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
2811 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2812 break;
2813 case VERR_PGM_PHYS_PAGE_RESERVED:
2814 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2815 memset(pvDst, 0xff, cb);
2816 break;
2817 default:
2818 AssertMsgFailed(("%Rrc\n", rc));
2819 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2820 return rc;
2821 }
2822 PGMPhysReleasePageMappingLock(pVM, &Lock);
2823
2824 if (!(fFlags & X86_PTE_A))
2825 {
2826 /** @todo access bit emulation isn't 100% correct. */
2827 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2828 AssertRC(rc);
2829 }
2830 return VINF_SUCCESS;
2831 }
2832 }
2833 }
2834 else
2835 {
2836 /*
2837 * Crosses pages.
2838 */
2839 size_t cb2 = cb - cb1;
2840 uint64_t fFlags1;
2841 RTGCPHYS GCPhys1;
2842 uint64_t fFlags2;
2843 RTGCPHYS GCPhys2;
2844 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
2845 if (RT_SUCCESS(rc))
2846 {
2847 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2848 if (RT_SUCCESS(rc))
2849 {
2850 if (1) /** @todo we should check reserved bits ... */
2851 {
2852 const void *pvSrc;
2853 PGMPAGEMAPLOCK Lock;
2854 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
2855 switch (rc)
2856 {
2857 case VINF_SUCCESS:
2858 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
2859 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
2860 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2861 PGMPhysReleasePageMappingLock(pVM, &Lock);
2862 break;
2863 case VERR_PGM_PHYS_PAGE_RESERVED:
2864 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2865 memset(pvDst, 0xff, cb1);
2866 break;
2867 default:
2868 AssertMsgFailed(("%Rrc\n", rc));
2869 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2870 return rc;
2871 }
2872
2873 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
2874 switch (rc)
2875 {
2876 case VINF_SUCCESS:
2877 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
2878 PGMPhysReleasePageMappingLock(pVM, &Lock);
2879 break;
2880 case VERR_PGM_PHYS_PAGE_RESERVED:
2881 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2882 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
2883 break;
2884 default:
2885 AssertMsgFailed(("%Rrc\n", rc));
2886 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2887 return rc;
2888 }
2889
2890 if (!(fFlags1 & X86_PTE_A))
2891 {
2892 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2893 AssertRC(rc);
2894 }
2895 if (!(fFlags2 & X86_PTE_A))
2896 {
2897 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2898 AssertRC(rc);
2899 }
2900 return VINF_SUCCESS;
2901 }
2902 /* sort out which page */
2903 }
2904 else
2905 GCPtrSrc += cb1; /* fault on 2nd page */
2906 }
2907 }
2908
2909 /*
2910 * Raise a #PF if we're allowed to do that.
2911 */
2912 /* Calc the error bits. */
2913 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
2914 uint32_t uErr;
2915 switch (rc)
2916 {
2917 case VINF_SUCCESS:
2918 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2919 rc = VERR_ACCESS_DENIED;
2920 break;
2921
2922 case VERR_PAGE_NOT_PRESENT:
2923 case VERR_PAGE_TABLE_NOT_PRESENT:
2924 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2925 break;
2926
2927 default:
2928 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2929 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2930 return rc;
2931 }
2932 if (fRaiseTrap)
2933 {
2934 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
2935 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2936 }
2937 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
2938 return rc;
2939}
2940
2941
2942/**
2943 * Performs a write to guest virtual memory for instruction emulation.
2944 *
2945 * This will check permissions, raise exceptions and update the dirty and access
2946 * bits.
2947 *
2948 * @returns VBox status code suitable to scheduling.
2949 * @retval VINF_SUCCESS if the read was performed successfully.
2950 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2951 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2952 *
2953 * @param pVM The VM handle.
2954 * @param pCtxCore The context core.
2955 * @param GCPtrDst The destination address.
2956 * @param pvSrc What to write.
2957 * @param cb The number of bytes to write. Not more than a page.
2958 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
2959 * an appropriate error status will be returned (no
2960 * informational at all).
2961 *
2962 * @remarks Takes the PGM lock.
2963 * @remarks A page fault on the 2nd page of the access will be raised without
2964 * writing the bits on the first page since we're ASSUMING that the
2965 * caller is emulating an instruction access.
2966 * @remarks This function will dynamically map physical pages in GC. This may
2967 * unmap mappings done by the caller. Be careful!
2968 */
2969VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
2970{
2971 Assert(cb <= PAGE_SIZE);
2972
2973 /*
2974 * 1. Translate virtual to physical. This may fault.
2975 * 2. Map the physical address.
2976 * 3. Do the write operation.
2977 * 4. Set access bits if required.
2978 */
2979 int rc;
2980 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
2981 if (cb <= cb1)
2982 {
2983 /*
2984 * Not crossing pages.
2985 */
2986 RTGCPHYS GCPhys;
2987 uint64_t fFlags;
2988 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags, &GCPhys);
2989 if (RT_SUCCESS(rc))
2990 {
2991 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
2992 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
2993 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
2994 {
2995 void *pvDst;
2996 PGMPAGEMAPLOCK Lock;
2997 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
2998 switch (rc)
2999 {
3000 case VINF_SUCCESS:
3001 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3002 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3003 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3004 PGMPhysReleasePageMappingLock(pVM, &Lock);
3005 break;
3006 case VERR_PGM_PHYS_PAGE_RESERVED:
3007 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3008 /* bit bucket */
3009 break;
3010 default:
3011 AssertMsgFailed(("%Rrc\n", rc));
3012 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3013 return rc;
3014 }
3015
3016 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3017 {
3018 /** @todo dirty & access bit emulation isn't 100% correct. */
3019 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3020 AssertRC(rc);
3021 }
3022 return VINF_SUCCESS;
3023 }
3024 rc = VERR_ACCESS_DENIED;
3025 }
3026 }
3027 else
3028 {
3029 /*
3030 * Crosses pages.
3031 */
3032 size_t cb2 = cb - cb1;
3033 uint64_t fFlags1;
3034 RTGCPHYS GCPhys1;
3035 uint64_t fFlags2;
3036 RTGCPHYS GCPhys2;
3037 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags1, &GCPhys1);
3038 if (RT_SUCCESS(rc))
3039 {
3040 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3041 if (RT_SUCCESS(rc))
3042 {
3043 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3044 && (fFlags2 & X86_PTE_RW))
3045 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
3046 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) )
3047 {
3048 void *pvDst;
3049 PGMPAGEMAPLOCK Lock;
3050 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3051 switch (rc)
3052 {
3053 case VINF_SUCCESS:
3054 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3055 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3056 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3057 PGMPhysReleasePageMappingLock(pVM, &Lock);
3058 break;
3059 case VERR_PGM_PHYS_PAGE_RESERVED:
3060 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3061 /* bit bucket */
3062 break;
3063 default:
3064 AssertMsgFailed(("%Rrc\n", rc));
3065 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3066 return rc;
3067 }
3068
3069 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3070 switch (rc)
3071 {
3072 case VINF_SUCCESS:
3073 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3074 PGMPhysReleasePageMappingLock(pVM, &Lock);
3075 break;
3076 case VERR_PGM_PHYS_PAGE_RESERVED:
3077 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3078 /* bit bucket */
3079 break;
3080 default:
3081 AssertMsgFailed(("%Rrc\n", rc));
3082 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3083 return rc;
3084 }
3085
3086 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3087 {
3088 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3089 AssertRC(rc);
3090 }
3091 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3092 {
3093 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3094 AssertRC(rc);
3095 }
3096 return VINF_SUCCESS;
3097 }
3098 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3099 GCPtrDst += cb1; /* fault on the 2nd page. */
3100 rc = VERR_ACCESS_DENIED;
3101 }
3102 else
3103 GCPtrDst += cb1; /* fault on the 2nd page. */
3104 }
3105 }
3106
3107 /*
3108 * Raise a #PF if we're allowed to do that.
3109 */
3110 /* Calc the error bits. */
3111 uint32_t uErr;
3112 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3113 switch (rc)
3114 {
3115 case VINF_SUCCESS:
3116 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3117 rc = VERR_ACCESS_DENIED;
3118 break;
3119
3120 case VERR_ACCESS_DENIED:
3121 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3122 break;
3123
3124 case VERR_PAGE_NOT_PRESENT:
3125 case VERR_PAGE_TABLE_NOT_PRESENT:
3126 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3127 break;
3128
3129 default:
3130 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3131 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3132 return rc;
3133 }
3134 if (fRaiseTrap)
3135 {
3136 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3137 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3138 }
3139 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3140 return rc;
3141}
3142
3143
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette