VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 18891

Last change on this file since 18891 was 18880, checked in by vboxsync, 16 years ago

PGM/MM: Implemented the /RamPreAlloc CFGM option.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 110.3 KB
Line 
1/* $Id: PGMAllPhys.cpp 18880 2009-04-14 09:42:01Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
66 switch (pRom->aPages[iPage].enmProt)
67 {
68 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
69 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
70 {
71 /*
72 * If it's a simple instruction which doesn't change the cpu state
73 * we will simply skip it. Otherwise we'll have to defer it to REM.
74 */
75 uint32_t cbOp;
76 DISCPUSTATE Cpu;
77 rc = EMInterpretDisasOne(pVM, pRegFrame, &Cpu, &cbOp);
78 if ( RT_SUCCESS(rc)
79 && Cpu.mode == CPUMODE_32BIT /** @todo why does this matter? */
80 && !(Cpu.prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
81 {
82 switch (Cpu.opcode)
83 {
84 /** @todo Find other instructions we can safely skip, possibly
85 * adding this kind of detection to DIS or EM. */
86 case OP_MOV:
87 pRegFrame->rip += cbOp;
88 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteHandled);
89 return VINF_SUCCESS;
90 }
91 }
92 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
93 return rc;
94 break;
95 }
96
97 case PGMROMPROT_READ_RAM_WRITE_RAM:
98 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
99 AssertRC(rc);
100 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
101
102 case PGMROMPROT_READ_ROM_WRITE_RAM:
103 /* Handle it in ring-3 because it's *way* easier there. */
104 break;
105
106 default:
107 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
108 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
109 VERR_INTERNAL_ERROR);
110 }
111
112 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteUnhandled);
113 return VINF_EM_RAW_EMULATE_INSTR;
114}
115
116#endif /* IN_RING3 */
117
118/**
119 * Checks if Address Gate 20 is enabled or not.
120 *
121 * @returns true if enabled.
122 * @returns false if disabled.
123 * @param pVM VM handle.
124 */
125VMMDECL(bool) PGMPhysIsA20Enabled(PVM pVM)
126{
127 LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
128 return pVM->pgm.s.fA20Enabled;
129}
130
131
132/**
133 * Validates a GC physical address.
134 *
135 * @returns true if valid.
136 * @returns false if invalid.
137 * @param pVM The VM handle.
138 * @param GCPhys The physical address to validate.
139 */
140VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
141{
142 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
143 return pPage != NULL;
144}
145
146
147/**
148 * Checks if a GC physical address is a normal page,
149 * i.e. not ROM, MMIO or reserved.
150 *
151 * @returns true if normal.
152 * @returns false if invalid, ROM, MMIO or reserved page.
153 * @param pVM The VM handle.
154 * @param GCPhys The physical address to check.
155 */
156VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
157{
158 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
159 return pPage
160 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
161}
162
163
164/**
165 * Converts a GC physical address to a HC physical address.
166 *
167 * @returns VINF_SUCCESS on success.
168 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
169 * page but has no physical backing.
170 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
171 * GC physical address.
172 *
173 * @param pVM The VM handle.
174 * @param GCPhys The GC physical address to convert.
175 * @param pHCPhys Where to store the HC physical address on success.
176 */
177VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
178{
179 PPGMPAGE pPage;
180 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
181 if (RT_FAILURE(rc))
182 return rc;
183
184 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
185 return VINF_SUCCESS;
186}
187
188
189/**
190 * Invalidates the GC page mapping TLB.
191 *
192 * @param pVM The VM handle.
193 */
194VMMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
195{
196 /* later */
197 NOREF(pVM);
198}
199
200
201/**
202 * Invalidates the ring-0 page mapping TLB.
203 *
204 * @param pVM The VM handle.
205 */
206VMMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
207{
208 PGMPhysInvalidatePageR3MapTLB(pVM);
209}
210
211
212/**
213 * Invalidates the ring-3 page mapping TLB.
214 *
215 * @param pVM The VM handle.
216 */
217VMMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
218{
219 pgmLock(pVM);
220 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
221 {
222 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
223 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
224 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
225 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
226 }
227 pgmUnlock(pVM);
228}
229
230
231/**
232 * Makes sure that there is at least one handy page ready for use.
233 *
234 * This will also take the appropriate actions when reaching water-marks.
235 *
236 * @returns VBox status code.
237 * @retval VINF_SUCCESS on success.
238 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
239 *
240 * @param pVM The VM handle.
241 *
242 * @remarks Must be called from within the PGM critical section. It may
243 * nip back to ring-3/0 in some cases.
244 */
245static int pgmPhysEnsureHandyPage(PVM pVM)
246{
247 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
248
249 /*
250 * Do we need to do anything special?
251 */
252#ifdef IN_RING3
253 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
254#else
255 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
256#endif
257 {
258 /*
259 * Allocate pages only if we're out of them, or in ring-3, almost out.
260 */
261#ifdef IN_RING3
262 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
263#else
264 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
265#endif
266 {
267 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
268 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
269#ifdef IN_RING3
270 int rc = PGMR3PhysAllocateHandyPages(pVM);
271#elif defined(IN_RING0)
272 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
273#else
274 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
275#endif
276 if (RT_UNLIKELY(rc != VINF_SUCCESS))
277 {
278 if (RT_FAILURE(rc))
279 return rc;
280 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
281 if (!pVM->pgm.s.cHandyPages)
282 {
283 LogRel(("PGM: no more handy pages!\n"));
284 return VERR_EM_NO_MEMORY;
285 }
286 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
287 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
288#ifdef IN_RING3
289 REMR3NotifyFF(pVM);
290#else
291 VM_FF_SET(pVM, VM_FF_TO_R3); /* paranoia */
292#endif
293 }
294 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
295 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
296 ("%u\n", pVM->pgm.s.cHandyPages),
297 VERR_INTERNAL_ERROR);
298 }
299 else
300 {
301 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
302 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
303#ifndef IN_RING3
304 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
305 {
306 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
307 VM_FF_SET(pVM, VM_FF_TO_R3);
308 }
309#endif
310 }
311 }
312
313 return VINF_SUCCESS;
314}
315
316
317/**
318 * Replace a zero or shared page with new page that we can write to.
319 *
320 * @returns The following VBox status codes.
321 * @retval VINF_SUCCESS on success, pPage is modified.
322 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
323 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
324 *
325 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
326 *
327 * @param pVM The VM address.
328 * @param pPage The physical page tracking structure. This will
329 * be modified on success.
330 * @param GCPhys The address of the page.
331 *
332 * @remarks Must be called from within the PGM critical section. It may
333 * nip back to ring-3/0 in some cases.
334 *
335 * @remarks This function shouldn't really fail, however if it does
336 * it probably means we've screwed up the size of handy pages and/or
337 * the low-water mark. Or, that some device I/O is causing a lot of
338 * pages to be allocated while while the host is in a low-memory
339 * condition. This latter should be handled elsewhere and in a more
340 * controlled manner, it's on the @bugref{3170} todo list...
341 */
342int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
343{
344 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
345
346 /*
347 * Prereqs.
348 */
349 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
350 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
351 Assert(!PGM_PAGE_IS_MMIO(pPage));
352
353
354 /*
355 * Flush any shadow page table mappings of the page.
356 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
357 */
358 bool fFlushTLBs = false;
359 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
360 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
361
362 /*
363 * Ensure that we've got a page handy, take it and use it.
364 */
365 int rc2 = pgmPhysEnsureHandyPage(pVM);
366 if (RT_FAILURE(rc2))
367 {
368 if (fFlushTLBs)
369 PGM_INVL_GUEST_TLBS();
370 Assert(rc2 == VERR_EM_NO_MEMORY);
371 return rc2;
372 }
373 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
374 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
375 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
376 Assert(!PGM_PAGE_IS_MMIO(pPage));
377
378 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
379 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
380 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
381 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
382 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
383 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
384
385 /*
386 * There are one or two action to be taken the next time we allocate handy pages:
387 * - Tell the GMM (global memory manager) what the page is being used for.
388 * (Speeds up replacement operations - sharing and defragmenting.)
389 * - If the current backing is shared, it must be freed.
390 */
391 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
392 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
393
394 if (PGM_PAGE_IS_SHARED(pPage))
395 {
396 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
397 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
398 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
399
400 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
401 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
402 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
403 pVM->pgm.s.cSharedPages--;
404 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
405 }
406 else
407 {
408 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
409 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
410 pVM->pgm.s.cZeroPages--;
411 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
412 }
413
414 /*
415 * Do the PGMPAGE modifications.
416 */
417 pVM->pgm.s.cPrivatePages++;
418 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
419 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
420 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
421
422 if ( fFlushTLBs
423 && rc != VINF_PGM_GCPHYS_ALIASED)
424 PGM_INVL_GUEST_TLBS();
425 return rc;
426}
427
428
429/**
430 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
431 *
432 * @returns VBox status code.
433 * @retval VINF_SUCCESS on success.
434 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
435 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
436 *
437 * @param pVM The VM address.
438 * @param pPage The physical page tracking structure.
439 * @param GCPhys The address of the page.
440 *
441 * @remarks Called from within the PGM critical section.
442 */
443int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
444{
445 switch (PGM_PAGE_GET_STATE(pPage))
446 {
447 case PGM_PAGE_STATE_WRITE_MONITORED:
448 PGM_PAGE_SET_WRITTEN_TO(pPage);
449 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
450 /* fall thru */
451 default: /* to shut up GCC */
452 case PGM_PAGE_STATE_ALLOCATED:
453 return VINF_SUCCESS;
454
455 /*
456 * Zero pages can be dummy pages for MMIO or reserved memory,
457 * so we need to check the flags before joining cause with
458 * shared page replacement.
459 */
460 case PGM_PAGE_STATE_ZERO:
461 if (PGM_PAGE_IS_MMIO(pPage))
462 return VERR_PGM_PHYS_PAGE_RESERVED;
463 /* fall thru */
464 case PGM_PAGE_STATE_SHARED:
465 return pgmPhysAllocPage(pVM, pPage, GCPhys);
466 }
467}
468
469
470/**
471 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
472 *
473 * @returns VBox status code.
474 * @retval VINF_SUCCESS on success.
475 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
476 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
477 *
478 * @param pVM The VM address.
479 * @param pPage The physical page tracking structure.
480 * @param GCPhys The address of the page.
481 */
482int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
483{
484 int rc = pgmLock(pVM);
485 if (RT_SUCCESS(rc))
486 {
487 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
488 pgmUnlock(pVM);
489 }
490 return rc;
491}
492
493
494/**
495 * Internal usage: Map the page specified by its GMM ID.
496 *
497 * This is similar to pgmPhysPageMap
498 *
499 * @returns VBox status code.
500 *
501 * @param pVM The VM handle.
502 * @param idPage The Page ID.
503 * @param HCPhys The physical address (for RC).
504 * @param ppv Where to store the mapping address.
505 *
506 * @remarks Called from within the PGM critical section.
507 */
508int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
509{
510 /*
511 * Validation.
512 */
513 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
514 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
515 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
516 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
517
518#ifdef IN_RC
519 /*
520 * Map it by HCPhys.
521 */
522 return PGMDynMapHCPage(pVM, HCPhys, ppv);
523
524#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
525 /*
526 * Map it by HCPhys.
527 */
528 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
529
530#else
531 /*
532 * Find/make Chunk TLB entry for the mapping chunk.
533 */
534 PPGMCHUNKR3MAP pMap;
535 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
536 if (pTlbe->idChunk == idChunk)
537 {
538 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
539 pMap = pTlbe->pChunk;
540 }
541 else
542 {
543 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
544
545 /*
546 * Find the chunk, map it if necessary.
547 */
548 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
549 if (!pMap)
550 {
551# ifdef IN_RING0
552 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
553 AssertRCReturn(rc, rc);
554 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
555 Assert(pMap);
556# else
557 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
558 if (RT_FAILURE(rc))
559 return rc;
560# endif
561 }
562
563 /*
564 * Enter it into the Chunk TLB.
565 */
566 pTlbe->idChunk = idChunk;
567 pTlbe->pChunk = pMap;
568 pMap->iAge = 0;
569 }
570
571 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
572 return VINF_SUCCESS;
573#endif
574}
575
576
577/**
578 * Maps a page into the current virtual address space so it can be accessed.
579 *
580 * @returns VBox status code.
581 * @retval VINF_SUCCESS on success.
582 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
583 *
584 * @param pVM The VM address.
585 * @param pPage The physical page tracking structure.
586 * @param GCPhys The address of the page.
587 * @param ppMap Where to store the address of the mapping tracking structure.
588 * @param ppv Where to store the mapping address of the page. The page
589 * offset is masked off!
590 *
591 * @remarks Called from within the PGM critical section.
592 */
593int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
594{
595 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
596
597#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
598 /*
599 * Just some sketchy GC/R0-darwin code.
600 */
601 *ppMap = NULL;
602 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
603 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
604# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
605 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
606# else
607 PGMDynMapHCPage(pVM, HCPhys, ppv);
608# endif
609 return VINF_SUCCESS;
610
611#else /* IN_RING3 || IN_RING0 */
612
613
614 /*
615 * Special case: ZERO and MMIO2 pages.
616 */
617 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
618 if (idChunk == NIL_GMM_CHUNKID)
619 {
620 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
621 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
622 {
623 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
624 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
625 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
626 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
627 }
628 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
629 {
630 /** @todo deal with aliased MMIO2 pages somehow...
631 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
632 * them, that would also avoid this mess. It would actually be kind of
633 * elegant... */
634 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
635 }
636 else
637 {
638 /** @todo handle MMIO2 */
639 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
640 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
641 ("pPage=%R[pgmpage]\n", pPage),
642 VERR_INTERNAL_ERROR_2);
643 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
644 }
645 *ppMap = NULL;
646 return VINF_SUCCESS;
647 }
648
649 /*
650 * Find/make Chunk TLB entry for the mapping chunk.
651 */
652 PPGMCHUNKR3MAP pMap;
653 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
654 if (pTlbe->idChunk == idChunk)
655 {
656 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
657 pMap = pTlbe->pChunk;
658 }
659 else
660 {
661 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
662
663 /*
664 * Find the chunk, map it if necessary.
665 */
666 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
667 if (!pMap)
668 {
669#ifdef IN_RING0
670 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
671 AssertRCReturn(rc, rc);
672 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
673 Assert(pMap);
674#else
675 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
676 if (RT_FAILURE(rc))
677 return rc;
678#endif
679 }
680
681 /*
682 * Enter it into the Chunk TLB.
683 */
684 pTlbe->idChunk = idChunk;
685 pTlbe->pChunk = pMap;
686 pMap->iAge = 0;
687 }
688
689 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
690 *ppMap = pMap;
691 return VINF_SUCCESS;
692#endif /* IN_RING3 */
693}
694
695
696#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
697/**
698 * Load a guest page into the ring-3 physical TLB.
699 *
700 * @returns VBox status code.
701 * @retval VINF_SUCCESS on success
702 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
703 * @param pPGM The PGM instance pointer.
704 * @param GCPhys The guest physical address in question.
705 */
706int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
707{
708 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
709
710 /*
711 * Find the ram range.
712 * 99.8% of requests are expected to be in the first range.
713 */
714 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
715 RTGCPHYS off = GCPhys - pRam->GCPhys;
716 if (RT_UNLIKELY(off >= pRam->cb))
717 {
718 do
719 {
720 pRam = pRam->CTX_SUFF(pNext);
721 if (!pRam)
722 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
723 off = GCPhys - pRam->GCPhys;
724 } while (off >= pRam->cb);
725 }
726
727 /*
728 * Map the page.
729 * Make a special case for the zero page as it is kind of special.
730 */
731 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
732 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
733 if (!PGM_PAGE_IS_ZERO(pPage))
734 {
735 void *pv;
736 PPGMPAGEMAP pMap;
737 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
738 if (RT_FAILURE(rc))
739 return rc;
740 pTlbe->pMap = pMap;
741 pTlbe->pv = pv;
742 }
743 else
744 {
745 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
746 pTlbe->pMap = NULL;
747 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
748 }
749 pTlbe->pPage = pPage;
750 return VINF_SUCCESS;
751}
752
753
754/**
755 * Load a guest page into the ring-3 physical TLB.
756 *
757 * @returns VBox status code.
758 * @retval VINF_SUCCESS on success
759 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
760 *
761 * @param pPGM The PGM instance pointer.
762 * @param pPage Pointer to the PGMPAGE structure corresponding to
763 * GCPhys.
764 * @param GCPhys The guest physical address in question.
765 */
766int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
767{
768 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
769
770 /*
771 * Map the page.
772 * Make a special case for the zero page as it is kind of special.
773 */
774 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
775 if (!PGM_PAGE_IS_ZERO(pPage))
776 {
777 void *pv;
778 PPGMPAGEMAP pMap;
779 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
780 if (RT_FAILURE(rc))
781 return rc;
782 pTlbe->pMap = pMap;
783 pTlbe->pv = pv;
784 }
785 else
786 {
787 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
788 pTlbe->pMap = NULL;
789 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
790 }
791 pTlbe->pPage = pPage;
792 return VINF_SUCCESS;
793}
794#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
795
796
797/**
798 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
799 * own the PGM lock and therefore not need to lock the mapped page.
800 *
801 * @returns VBox status code.
802 * @retval VINF_SUCCESS on success.
803 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
804 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
805 *
806 * @param pVM The VM handle.
807 * @param GCPhys The guest physical address of the page that should be mapped.
808 * @param pPage Pointer to the PGMPAGE structure for the page.
809 * @param ppv Where to store the address corresponding to GCPhys.
810 *
811 * @internal
812 */
813int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
814{
815 int rc;
816 AssertReturn(pPage, VERR_INTERNAL_ERROR);
817 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
818
819 /*
820 * Make sure the page is writable.
821 */
822 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
823 {
824 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
825 if (RT_FAILURE(rc))
826 return rc;
827 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
828 }
829 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
830
831 /*
832 * Get the mapping address.
833 */
834#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
835 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
836#else
837 PPGMPAGEMAPTLBE pTlbe;
838 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
839 if (RT_FAILURE(rc))
840 return rc;
841 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
842#endif
843 return VINF_SUCCESS;
844}
845
846
847/**
848 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
849 * own the PGM lock and therefore not need to lock the mapped page.
850 *
851 * @returns VBox status code.
852 * @retval VINF_SUCCESS on success.
853 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
854 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
855 *
856 * @param pVM The VM handle.
857 * @param GCPhys The guest physical address of the page that should be mapped.
858 * @param pPage Pointer to the PGMPAGE structure for the page.
859 * @param ppv Where to store the address corresponding to GCPhys.
860 *
861 * @internal
862 */
863int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
864{
865 AssertReturn(pPage, VERR_INTERNAL_ERROR);
866 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
867 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
868
869 /*
870 * Get the mapping address.
871 */
872#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
873 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
874#else
875 PPGMPAGEMAPTLBE pTlbe;
876 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
877 if (RT_FAILURE(rc))
878 return rc;
879 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
880#endif
881 return VINF_SUCCESS;
882}
883
884
885/**
886 * Requests the mapping of a guest page into the current context.
887 *
888 * This API should only be used for very short term, as it will consume
889 * scarse resources (R0 and GC) in the mapping cache. When you're done
890 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
891 *
892 * This API will assume your intention is to write to the page, and will
893 * therefore replace shared and zero pages. If you do not intend to modify
894 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
895 *
896 * @returns VBox status code.
897 * @retval VINF_SUCCESS on success.
898 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
899 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
900 *
901 * @param pVM The VM handle.
902 * @param GCPhys The guest physical address of the page that should be mapped.
903 * @param ppv Where to store the address corresponding to GCPhys.
904 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
905 *
906 * @remarks The caller is responsible for dealing with access handlers.
907 * @todo Add an informational return code for pages with access handlers?
908 *
909 * @remark Avoid calling this API from within critical sections (other than the
910 * PGM one) because of the deadlock risk. External threads may need to
911 * delegate jobs to the EMTs.
912 * @thread Any thread.
913 */
914VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
915{
916#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
917
918 /*
919 * Find the page and make sure it's writable.
920 */
921 PPGMPAGE pPage;
922 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
923 if (RT_SUCCESS(rc))
924 {
925 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
926 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
927 if (RT_SUCCESS(rc))
928 {
929 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
930# if 0
931 pLock->pvMap = 0;
932 pLock->pvPage = pPage;
933# else
934 pLock->u32Dummy = UINT32_MAX;
935# endif
936 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
937 rc = VINF_SUCCESS;
938 }
939 }
940
941#else /* IN_RING3 || IN_RING0 */
942 int rc = pgmLock(pVM);
943 AssertRCReturn(rc, rc);
944
945 /*
946 * Query the Physical TLB entry for the page (may fail).
947 */
948 PPGMPAGEMAPTLBE pTlbe;
949 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
950 if (RT_SUCCESS(rc))
951 {
952 /*
953 * If the page is shared, the zero page, or being write monitored
954 * it must be converted to an page that's writable if possible.
955 */
956 PPGMPAGE pPage = pTlbe->pPage;
957 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
958 {
959 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
960 if (RT_SUCCESS(rc))
961 {
962 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
963 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
964 }
965 }
966 if (RT_SUCCESS(rc))
967 {
968 /*
969 * Now, just perform the locking and calculate the return address.
970 */
971 PPGMPAGEMAP pMap = pTlbe->pMap;
972 if (pMap)
973 pMap->cRefs++;
974# if 0 /** @todo implement locking properly */
975 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
976 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
977 {
978 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
979 if (pMap)
980 pMap->cRefs++; /* Extra ref to prevent it from going away. */
981 }
982# endif
983 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
984 pLock->pvPage = pPage;
985 pLock->pvMap = pMap;
986 }
987 }
988
989 pgmUnlock(pVM);
990#endif /* IN_RING3 || IN_RING0 */
991 return rc;
992}
993
994
995/**
996 * Requests the mapping of a guest page into the current context.
997 *
998 * This API should only be used for very short term, as it will consume
999 * scarse resources (R0 and GC) in the mapping cache. When you're done
1000 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1001 *
1002 * @returns VBox status code.
1003 * @retval VINF_SUCCESS on success.
1004 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1005 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1006 *
1007 * @param pVM The VM handle.
1008 * @param GCPhys The guest physical address of the page that should be mapped.
1009 * @param ppv Where to store the address corresponding to GCPhys.
1010 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1011 *
1012 * @remarks The caller is responsible for dealing with access handlers.
1013 * @todo Add an informational return code for pages with access handlers?
1014 *
1015 * @remark Avoid calling this API from within critical sections (other than
1016 * the PGM one) because of the deadlock risk.
1017 * @thread Any thread.
1018 */
1019VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1020{
1021#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1022
1023 /*
1024 * Find the page and make sure it's readable.
1025 */
1026 PPGMPAGE pPage;
1027 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1028 if (RT_SUCCESS(rc))
1029 {
1030 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1031 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1032 else
1033 {
1034 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1035# if 0
1036 pLock->pvMap = 0;
1037 pLock->pvPage = pPage;
1038# else
1039 pLock->u32Dummy = UINT32_MAX;
1040# endif
1041 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1042 rc = VINF_SUCCESS;
1043 }
1044 }
1045
1046#else /* IN_RING3 || IN_RING0 */
1047 int rc = pgmLock(pVM);
1048 AssertRCReturn(rc, rc);
1049
1050 /*
1051 * Query the Physical TLB entry for the page (may fail).
1052 */
1053 PPGMPAGEMAPTLBE pTlbe;
1054 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1055 if (RT_SUCCESS(rc))
1056 {
1057 /* MMIO pages doesn't have any readable backing. */
1058 PPGMPAGE pPage = pTlbe->pPage;
1059 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1060 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1061 else
1062 {
1063 /*
1064 * Now, just perform the locking and calculate the return address.
1065 */
1066 PPGMPAGEMAP pMap = pTlbe->pMap;
1067 if (pMap)
1068 pMap->cRefs++;
1069# if 0 /** @todo implement locking properly */
1070 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
1071 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
1072 {
1073 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
1074 if (pMap)
1075 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1076 }
1077# endif
1078 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1079 pLock->pvPage = pPage;
1080 pLock->pvMap = pMap;
1081 }
1082 }
1083
1084 pgmUnlock(pVM);
1085#endif /* IN_RING3 || IN_RING0 */
1086 return rc;
1087}
1088
1089
1090/**
1091 * Requests the mapping of a guest page given by virtual address into the current context.
1092 *
1093 * This API should only be used for very short term, as it will consume
1094 * scarse resources (R0 and GC) in the mapping cache. When you're done
1095 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1096 *
1097 * This API will assume your intention is to write to the page, and will
1098 * therefore replace shared and zero pages. If you do not intend to modify
1099 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1100 *
1101 * @returns VBox status code.
1102 * @retval VINF_SUCCESS on success.
1103 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1104 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1105 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1106 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1107 *
1108 * @param pVM The VM handle.
1109 * @param GCPhys The guest physical address of the page that should be mapped.
1110 * @param ppv Where to store the address corresponding to GCPhys.
1111 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1112 *
1113 * @remark Avoid calling this API from within critical sections (other than
1114 * the PGM one) because of the deadlock risk.
1115 * @thread EMT
1116 */
1117VMMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1118{
1119 VM_ASSERT_EMT(pVM);
1120 RTGCPHYS GCPhys;
1121 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1122 if (RT_SUCCESS(rc))
1123 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, pLock);
1124 return rc;
1125}
1126
1127
1128/**
1129 * Requests the mapping of a guest page given by virtual address into the current context.
1130 *
1131 * This API should only be used for very short term, as it will consume
1132 * scarse resources (R0 and GC) in the mapping cache. When you're done
1133 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1134 *
1135 * @returns VBox status code.
1136 * @retval VINF_SUCCESS on success.
1137 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1138 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1139 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1140 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1141 *
1142 * @param pVM The VM handle.
1143 * @param GCPhys The guest physical address of the page that should be mapped.
1144 * @param ppv Where to store the address corresponding to GCPhys.
1145 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1146 *
1147 * @remark Avoid calling this API from within critical sections (other than
1148 * the PGM one) because of the deadlock risk.
1149 * @thread EMT
1150 */
1151VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1152{
1153 VM_ASSERT_EMT(pVM);
1154 RTGCPHYS GCPhys;
1155 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1156 if (RT_SUCCESS(rc))
1157 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, pLock);
1158 return rc;
1159}
1160
1161
1162/**
1163 * Release the mapping of a guest page.
1164 *
1165 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1166 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1167 *
1168 * @param pVM The VM handle.
1169 * @param pLock The lock structure initialized by the mapping function.
1170 */
1171VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1172{
1173#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1174 /* currently nothing to do here. */
1175 Assert(pLock->u32Dummy == UINT32_MAX);
1176 pLock->u32Dummy = 0;
1177
1178#else /* IN_RING3 */
1179 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1180 if (!pMap)
1181 {
1182 /* The ZERO page and MMIO2 ends up here. */
1183 Assert(pLock->pvPage);
1184 pLock->pvPage = NULL;
1185 }
1186 else
1187 {
1188 pgmLock(pVM);
1189
1190# if 0 /** @todo implement page locking */
1191 PPGMPAGE pPage = (PPGMPAGE)pLock->pvPage;
1192 Assert(pPage->cLocks >= 1);
1193 if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
1194 pPage->cLocks--;
1195# endif
1196
1197 Assert(pMap->cRefs >= 1);
1198 pMap->cRefs--;
1199 pMap->iAge = 0;
1200
1201 pgmUnlock(pVM);
1202 }
1203#endif /* IN_RING3 */
1204}
1205
1206
1207/**
1208 * Converts a GC physical address to a HC ring-3 pointer.
1209 *
1210 * @returns VINF_SUCCESS on success.
1211 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1212 * page but has no physical backing.
1213 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1214 * GC physical address.
1215 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1216 * a dynamic ram chunk boundary
1217 *
1218 * @param pVM The VM handle.
1219 * @param GCPhys The GC physical address to convert.
1220 * @param cbRange Physical range
1221 * @param pR3Ptr Where to store the R3 pointer on success.
1222 *
1223 * @deprecated Avoid when possible!
1224 */
1225VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1226{
1227/** @todo this is kind of hacky and needs some more work. */
1228 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1229
1230 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1231#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1232 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1233#else
1234 pgmLock(pVM);
1235
1236 PPGMRAMRANGE pRam;
1237 PPGMPAGE pPage;
1238 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1239 if (RT_SUCCESS(rc))
1240 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1241
1242 pgmUnlock(pVM);
1243 Assert(rc <= VINF_SUCCESS);
1244 return rc;
1245#endif
1246}
1247
1248
1249#ifdef VBOX_STRICT
1250/**
1251 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1252 *
1253 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1254 * @param pVM The VM handle.
1255 * @param GCPhys The GC Physical addresss.
1256 * @param cbRange Physical range.
1257 *
1258 * @deprecated Avoid when possible.
1259 */
1260VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1261{
1262 RTR3PTR R3Ptr;
1263 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1264 if (RT_SUCCESS(rc))
1265 return R3Ptr;
1266 return NIL_RTR3PTR;
1267}
1268#endif /* VBOX_STRICT */
1269
1270
1271/**
1272 * Converts a guest pointer to a GC physical address.
1273 *
1274 * This uses the current CR3/CR0/CR4 of the guest.
1275 *
1276 * @returns VBox status code.
1277 * @param pVM The VM Handle
1278 * @param GCPtr The guest pointer to convert.
1279 * @param pGCPhys Where to store the GC physical address.
1280 */
1281VMMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1282{
1283 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1284 if (pGCPhys && RT_SUCCESS(rc))
1285 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1286 return rc;
1287}
1288
1289
1290/**
1291 * Converts a guest pointer to a HC physical address.
1292 *
1293 * This uses the current CR3/CR0/CR4 of the guest.
1294 *
1295 * @returns VBox status code.
1296 * @param pVM The VM Handle
1297 * @param GCPtr The guest pointer to convert.
1298 * @param pHCPhys Where to store the HC physical address.
1299 */
1300VMMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1301{
1302 RTGCPHYS GCPhys;
1303 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1304 if (RT_SUCCESS(rc))
1305 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1306 return rc;
1307}
1308
1309
1310/**
1311 * Converts a guest pointer to a R3 pointer.
1312 *
1313 * This uses the current CR3/CR0/CR4 of the guest.
1314 *
1315 * @returns VBox status code.
1316 * @param pVM The VM Handle
1317 * @param GCPtr The guest pointer to convert.
1318 * @param pR3Ptr Where to store the R3 virtual address.
1319 *
1320 * @deprecated Don't use this.
1321 */
1322VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVM pVM, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1323{
1324 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1325 RTGCPHYS GCPhys;
1326 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1327 if (RT_SUCCESS(rc))
1328 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1329 return rc;
1330}
1331
1332
1333
1334#undef LOG_GROUP
1335#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1336
1337
1338#ifdef IN_RING3
1339/**
1340 * Cache PGMPhys memory access
1341 *
1342 * @param pVM VM Handle.
1343 * @param pCache Cache structure pointer
1344 * @param GCPhys GC physical address
1345 * @param pbHC HC pointer corresponding to physical page
1346 *
1347 * @thread EMT.
1348 */
1349static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1350{
1351 uint32_t iCacheIndex;
1352
1353 Assert(VM_IS_EMT(pVM));
1354
1355 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1356 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1357
1358 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1359
1360 ASMBitSet(&pCache->aEntries, iCacheIndex);
1361
1362 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1363 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1364}
1365#endif /* IN_RING3 */
1366
1367
1368/**
1369 * Deals with reading from a page with one or more ALL access handlers.
1370 *
1371 * @returns VBox status code. Can be ignored in ring-3.
1372 * @retval VINF_SUCCESS.
1373 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1374 *
1375 * @param pVM The VM handle.
1376 * @param pPage The page descriptor.
1377 * @param GCPhys The physical address to start reading at.
1378 * @param pvBuf Where to put the bits we read.
1379 * @param cb How much to read - less or equal to a page.
1380 */
1381static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1382{
1383 /*
1384 * The most frequent access here is MMIO and shadowed ROM.
1385 * The current code ASSUMES all these access handlers covers full pages!
1386 */
1387
1388 /*
1389 * Whatever we do we need the source page, map it first.
1390 */
1391 const void *pvSrc = NULL;
1392 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1393 if (RT_FAILURE(rc))
1394 {
1395 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1396 GCPhys, pPage, rc));
1397 memset(pvBuf, 0xff, cb);
1398 return VINF_SUCCESS;
1399 }
1400 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1401
1402 /*
1403 * Deal with any physical handlers.
1404 */
1405 PPGMPHYSHANDLER pPhys = NULL;
1406 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1407 {
1408#ifdef IN_RING3
1409 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1410 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1411 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1412 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1413 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1414 Assert(pPhys->CTX_SUFF(pfnHandler));
1415
1416 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1417 STAM_PROFILE_START(&pPhys->Stat, h);
1418 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pPhys->CTX_SUFF(pvUser));
1419 STAM_PROFILE_STOP(&pPhys->Stat, h);
1420 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1421#else
1422 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1423 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1424 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1425#endif
1426 }
1427
1428 /*
1429 * Deal with any virtual handlers.
1430 */
1431 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1432 {
1433 unsigned iPage;
1434 PPGMVIRTHANDLER pVirt;
1435
1436 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1437 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1438 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1439 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1440 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1441
1442#ifdef IN_RING3
1443 if (pVirt->pfnHandlerR3)
1444 {
1445 if (!pPhys)
1446 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1447 else
1448 Log(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1449 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1450 + (iPage << PAGE_SHIFT)
1451 + (GCPhys & PAGE_OFFSET_MASK);
1452
1453 STAM_PROFILE_START(&pVirt->Stat, h);
1454 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1455 STAM_PROFILE_STOP(&pVirt->Stat, h);
1456 if (rc2 == VINF_SUCCESS)
1457 rc = VINF_SUCCESS;
1458 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1459 }
1460 else
1461 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1462#else
1463 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1464 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1465 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1466#endif
1467 }
1468
1469 /*
1470 * Take the default action.
1471 */
1472 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1473 memcpy(pvBuf, pvSrc, cb);
1474 return rc;
1475}
1476
1477
1478/**
1479 * Read physical memory.
1480 *
1481 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1482 * want to ignore those.
1483 *
1484 * @returns VBox status code. Can be ignored in ring-3.
1485 * @retval VINF_SUCCESS.
1486 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1487 *
1488 * @param pVM VM Handle.
1489 * @param GCPhys Physical address start reading from.
1490 * @param pvBuf Where to put the read bits.
1491 * @param cbRead How many bytes to read.
1492 */
1493VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1494{
1495 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1496 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1497
1498 pgmLock(pVM);
1499
1500 /*
1501 * Copy loop on ram ranges.
1502 */
1503 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1504 for (;;)
1505 {
1506 /* Find range. */
1507 while (pRam && GCPhys > pRam->GCPhysLast)
1508 pRam = pRam->CTX_SUFF(pNext);
1509 /* Inside range or not? */
1510 if (pRam && GCPhys >= pRam->GCPhys)
1511 {
1512 /*
1513 * Must work our way thru this page by page.
1514 */
1515 RTGCPHYS off = GCPhys - pRam->GCPhys;
1516 while (off < pRam->cb)
1517 {
1518 unsigned iPage = off >> PAGE_SHIFT;
1519 PPGMPAGE pPage = &pRam->aPages[iPage];
1520 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1521 if (cb > cbRead)
1522 cb = cbRead;
1523
1524 /*
1525 * Any ALL access handlers?
1526 */
1527 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1528 {
1529 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1530 if (RT_FAILURE(rc))
1531 return rc;
1532 }
1533 else
1534 {
1535 /*
1536 * Get the pointer to the page.
1537 */
1538 const void *pvSrc;
1539 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1540 if (RT_SUCCESS(rc))
1541 memcpy(pvBuf, pvSrc, cb);
1542 else
1543 {
1544 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1545 pRam->GCPhys + off, pPage, rc));
1546 memset(pvBuf, 0xff, cb);
1547 }
1548 }
1549
1550 /* next page */
1551 if (cb >= cbRead)
1552 {
1553 pgmUnlock(pVM);
1554 return VINF_SUCCESS;
1555 }
1556 cbRead -= cb;
1557 off += cb;
1558 pvBuf = (char *)pvBuf + cb;
1559 } /* walk pages in ram range. */
1560
1561 GCPhys = pRam->GCPhysLast + 1;
1562 }
1563 else
1564 {
1565 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1566
1567 /*
1568 * Unassigned address space.
1569 */
1570 if (!pRam)
1571 break;
1572 size_t cb = pRam->GCPhys - GCPhys;
1573 if (cb >= cbRead)
1574 {
1575 memset(pvBuf, 0xff, cbRead);
1576 break;
1577 }
1578 memset(pvBuf, 0xff, cb);
1579
1580 cbRead -= cb;
1581 pvBuf = (char *)pvBuf + cb;
1582 GCPhys += cb;
1583 }
1584 } /* Ram range walk */
1585
1586 pgmUnlock(pVM);
1587 return VINF_SUCCESS;
1588}
1589
1590
1591/**
1592 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1593 *
1594 * @returns VBox status code. Can be ignored in ring-3.
1595 * @retval VINF_SUCCESS.
1596 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1597 *
1598 * @param pVM The VM handle.
1599 * @param pPage The page descriptor.
1600 * @param GCPhys The physical address to start writing at.
1601 * @param pvBuf What to write.
1602 * @param cbWrite How much to write - less or equal to a page.
1603 */
1604static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1605{
1606 void *pvDst = NULL;
1607 int rc;
1608
1609 /*
1610 * Give priority to physical handlers (like #PF does).
1611 *
1612 * Hope for a lonely physical handler first that covers the whole
1613 * write area. This should be a pretty frequent case with MMIO and
1614 * the heavy usage of full page handlers in the page pool.
1615 */
1616 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1617 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1618 {
1619 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1620 if (pCur)
1621 {
1622 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1623 Assert(pCur->CTX_SUFF(pfnHandler));
1624
1625 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1626 if (cbRange > cbWrite)
1627 cbRange = cbWrite;
1628
1629#ifndef IN_RING3
1630 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1631 NOREF(cbRange);
1632 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1633 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1634
1635#else /* IN_RING3 */
1636 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1637 if (!PGM_PAGE_IS_MMIO(pPage))
1638 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1639 else
1640 rc = VINF_SUCCESS;
1641 if (RT_SUCCESS(rc))
1642 {
1643 STAM_PROFILE_START(&pCur->Stat, h);
1644 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pCur->CTX_SUFF(pvUser));
1645 STAM_PROFILE_STOP(&pCur->Stat, h);
1646 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1647 memcpy(pvDst, pvBuf, cbRange);
1648 else
1649 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1650 }
1651 else
1652 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1653 GCPhys, pPage, rc), rc);
1654 if (RT_LIKELY(cbRange == cbWrite))
1655 return VINF_SUCCESS;
1656
1657 /* more fun to be had below */
1658 cbWrite -= cbRange;
1659 GCPhys += cbRange;
1660 pvBuf = (uint8_t *)pvBuf + cbRange;
1661 pvDst = (uint8_t *)pvDst + cbRange;
1662#endif /* IN_RING3 */
1663 }
1664 /* else: the handler is somewhere else in the page, deal with it below. */
1665 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1666 }
1667 /*
1668 * A virtual handler without any interfering physical handlers.
1669 * Hopefully it'll conver the whole write.
1670 */
1671 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1672 {
1673 unsigned iPage;
1674 PPGMVIRTHANDLER pCur;
1675 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1676 if (RT_SUCCESS(rc))
1677 {
1678 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1679 if (cbRange > cbWrite)
1680 cbRange = cbWrite;
1681
1682#ifndef IN_RING3
1683 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1684 NOREF(cbRange);
1685 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1686 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1687
1688#else /* IN_RING3 */
1689
1690 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1691 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1692 if (RT_SUCCESS(rc))
1693 {
1694 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1695 if (pCur->pfnHandlerR3)
1696 {
1697 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1698 + (iPage << PAGE_SHIFT)
1699 + (GCPhys & PAGE_OFFSET_MASK);
1700
1701 STAM_PROFILE_START(&pCur->Stat, h);
1702 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1703 STAM_PROFILE_STOP(&pCur->Stat, h);
1704 }
1705 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1706 memcpy(pvDst, pvBuf, cbRange);
1707 else
1708 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1709 }
1710 else
1711 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1712 GCPhys, pPage, rc), rc);
1713 if (RT_LIKELY(cbRange == cbWrite))
1714 return VINF_SUCCESS;
1715
1716 /* more fun to be had below */
1717 cbWrite -= cbRange;
1718 GCPhys += cbRange;
1719 pvBuf = (uint8_t *)pvBuf + cbRange;
1720 pvDst = (uint8_t *)pvDst + cbRange;
1721#endif
1722 }
1723 /* else: the handler is somewhere else in the page, deal with it below. */
1724 }
1725
1726 /*
1727 * Deal with all the odd ends.
1728 */
1729
1730 /* We need a writable destination page. */
1731 if (!pvDst)
1732 {
1733 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1734 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1735 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1736 GCPhys, pPage, rc), rc);
1737 }
1738
1739 /* The loop state (big + ugly). */
1740 unsigned iVirtPage = 0;
1741 PPGMVIRTHANDLER pVirt = NULL;
1742 uint32_t offVirt = PAGE_SIZE;
1743 uint32_t offVirtLast = PAGE_SIZE;
1744 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1745
1746 PPGMPHYSHANDLER pPhys = NULL;
1747 uint32_t offPhys = PAGE_SIZE;
1748 uint32_t offPhysLast = PAGE_SIZE;
1749 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1750
1751 /* The loop. */
1752 for (;;)
1753 {
1754 /*
1755 * Find the closest handler at or above GCPhys.
1756 */
1757 if (fMoreVirt && !pVirt)
1758 {
1759 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1760 if (RT_SUCCESS(rc))
1761 {
1762 offVirt = 0;
1763 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1764 }
1765 else
1766 {
1767 PPGMPHYS2VIRTHANDLER pVirtPhys;
1768 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1769 GCPhys, true /* fAbove */);
1770 if ( pVirtPhys
1771 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1772 {
1773 /* ASSUME that pVirtPhys only covers one page. */
1774 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1775 Assert(pVirtPhys->Core.Key > GCPhys);
1776
1777 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1778 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1779 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1780 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1781 }
1782 else
1783 {
1784 pVirt = NULL;
1785 fMoreVirt = false;
1786 offVirt = offVirtLast = PAGE_SIZE;
1787 }
1788 }
1789 }
1790
1791 if (fMorePhys && !pPhys)
1792 {
1793 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1794 if (pPhys)
1795 {
1796 offPhys = 0;
1797 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1798 }
1799 else
1800 {
1801 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
1802 GCPhys, true /* fAbove */);
1803 if ( pPhys
1804 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
1805 {
1806 offPhys = pPhys->Core.Key - GCPhys;
1807 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1808 }
1809 else
1810 {
1811 pPhys = NULL;
1812 fMorePhys = false;
1813 offPhys = offPhysLast = PAGE_SIZE;
1814 }
1815 }
1816 }
1817
1818 /*
1819 * Handle access to space without handlers (that's easy).
1820 */
1821 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1822 uint32_t cbRange = (uint32_t)cbWrite;
1823 if (offPhys && offVirt)
1824 {
1825 if (cbRange > offPhys)
1826 cbRange = offPhys;
1827 if (cbRange > offVirt)
1828 cbRange = offVirt;
1829 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
1830 }
1831 /*
1832 * Physical handler.
1833 */
1834 else if (!offPhys && offVirt)
1835 {
1836 if (cbRange > offPhysLast + 1)
1837 cbRange = offPhysLast + 1;
1838 if (cbRange > offVirt)
1839 cbRange = offVirt;
1840#ifdef IN_RING3
1841 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
1842 STAM_PROFILE_START(&pPhys->Stat, h);
1843 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
1844 STAM_PROFILE_STOP(&pPhys->Stat, h);
1845 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
1846 pPhys = NULL;
1847#else
1848 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1849 NOREF(cbRange);
1850 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1851 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1852#endif
1853 }
1854 /*
1855 * Virtual handler.
1856 */
1857 else if (offPhys && !offVirt)
1858 {
1859 if (cbRange > offVirtLast + 1)
1860 cbRange = offVirtLast + 1;
1861 if (cbRange > offPhys)
1862 cbRange = offPhys;
1863#ifdef IN_RING3
1864 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
1865 if (pVirt->pfnHandlerR3)
1866 {
1867 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1868 + (iVirtPage << PAGE_SHIFT)
1869 + (GCPhys & PAGE_OFFSET_MASK);
1870 STAM_PROFILE_START(&pVirt->Stat, h);
1871 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1872 STAM_PROFILE_STOP(&pVirt->Stat, h);
1873 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
1874 }
1875 pVirt = NULL;
1876#else
1877 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1878 NOREF(cbRange);
1879 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1880 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1881#endif
1882 }
1883 /*
1884 * Both... give the physical one priority.
1885 */
1886 else
1887 {
1888 Assert(!offPhys && !offVirt);
1889 if (cbRange > offVirtLast + 1)
1890 cbRange = offVirtLast + 1;
1891 if (cbRange > offPhysLast + 1)
1892 cbRange = offPhysLast + 1;
1893
1894#ifdef IN_RING3
1895 if (pVirt->pfnHandlerR3)
1896 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
1897 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
1898
1899 STAM_PROFILE_START(&pPhys->Stat, h);
1900 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
1901 STAM_PROFILE_STOP(&pPhys->Stat, h);
1902 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
1903 if (pVirt->pfnHandlerR3)
1904 {
1905
1906 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1907 + (iVirtPage << PAGE_SHIFT)
1908 + (GCPhys & PAGE_OFFSET_MASK);
1909 STAM_PROFILE_START(&pVirt->Stat, h);
1910 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1911 STAM_PROFILE_STOP(&pVirt->Stat, h);
1912 AssertLogRelMsg(rc2 != VINF_SUCCESS && rc2 != VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
1913 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
1914 rc = VINF_SUCCESS;
1915 }
1916 pPhys = NULL;
1917 pVirt = NULL;
1918#else
1919 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1920 NOREF(cbRange);
1921 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1922 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1923#endif
1924 }
1925 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1926 memcpy(pvDst, pvBuf, cbRange);
1927
1928 /*
1929 * Advance if we've got more stuff to do.
1930 */
1931 if (cbRange >= cbWrite)
1932 return VINF_SUCCESS;
1933
1934 cbWrite -= cbRange;
1935 GCPhys += cbRange;
1936 pvBuf = (uint8_t *)pvBuf + cbRange;
1937 pvDst = (uint8_t *)pvDst + cbRange;
1938
1939 offPhys -= cbRange;
1940 offPhysLast -= cbRange;
1941 offVirt -= cbRange;
1942 offVirtLast -= cbRange;
1943 }
1944}
1945
1946
1947/**
1948 * Write to physical memory.
1949 *
1950 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1951 * want to ignore those.
1952 *
1953 * @returns VBox status code. Can be ignored in ring-3.
1954 * @retval VINF_SUCCESS.
1955 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1956 *
1957 * @param pVM VM Handle.
1958 * @param GCPhys Physical address to write to.
1959 * @param pvBuf What to write.
1960 * @param cbWrite How many bytes to write.
1961 */
1962VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
1963{
1964 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
1965 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
1966 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
1967
1968 pgmLock(pVM);
1969
1970 /*
1971 * Copy loop on ram ranges.
1972 */
1973 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1974 for (;;)
1975 {
1976 /* Find range. */
1977 while (pRam && GCPhys > pRam->GCPhysLast)
1978 pRam = pRam->CTX_SUFF(pNext);
1979 /* Inside range or not? */
1980 if (pRam && GCPhys >= pRam->GCPhys)
1981 {
1982 /*
1983 * Must work our way thru this page by page.
1984 */
1985 RTGCPTR off = GCPhys - pRam->GCPhys;
1986 while (off < pRam->cb)
1987 {
1988 RTGCPTR iPage = off >> PAGE_SHIFT;
1989 PPGMPAGE pPage = &pRam->aPages[iPage];
1990 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1991 if (cb > cbWrite)
1992 cb = cbWrite;
1993
1994 /*
1995 * Any active WRITE or ALL access handlers?
1996 */
1997 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1998 {
1999 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2000 if (RT_FAILURE(rc))
2001 return rc;
2002 }
2003 else
2004 {
2005 /*
2006 * Get the pointer to the page.
2007 */
2008 void *pvDst;
2009 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2010 if (RT_SUCCESS(rc))
2011 memcpy(pvDst, pvBuf, cb);
2012 else
2013 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2014 pRam->GCPhys + off, pPage, rc));
2015 }
2016
2017 /* next page */
2018 if (cb >= cbWrite)
2019 {
2020 pgmUnlock(pVM);
2021 return VINF_SUCCESS;
2022 }
2023
2024 cbWrite -= cb;
2025 off += cb;
2026 pvBuf = (const char *)pvBuf + cb;
2027 } /* walk pages in ram range */
2028
2029 GCPhys = pRam->GCPhysLast + 1;
2030 }
2031 else
2032 {
2033 /*
2034 * Unassigned address space, skip it.
2035 */
2036 if (!pRam)
2037 break;
2038 size_t cb = pRam->GCPhys - GCPhys;
2039 if (cb >= cbWrite)
2040 break;
2041 cbWrite -= cb;
2042 pvBuf = (const char *)pvBuf + cb;
2043 GCPhys += cb;
2044 }
2045 } /* Ram range walk */
2046
2047 pgmUnlock(pVM);
2048 return VINF_SUCCESS;
2049}
2050
2051
2052/**
2053 * Read from guest physical memory by GC physical address, bypassing
2054 * MMIO and access handlers.
2055 *
2056 * @returns VBox status.
2057 * @param pVM VM handle.
2058 * @param pvDst The destination address.
2059 * @param GCPhysSrc The source address (GC physical address).
2060 * @param cb The number of bytes to read.
2061 */
2062VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2063{
2064 /*
2065 * Treat the first page as a special case.
2066 */
2067 if (!cb)
2068 return VINF_SUCCESS;
2069
2070 /* map the 1st page */
2071 void const *pvSrc;
2072 PGMPAGEMAPLOCK Lock;
2073 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2074 if (RT_FAILURE(rc))
2075 return rc;
2076
2077 /* optimize for the case where access is completely within the first page. */
2078 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2079 if (RT_LIKELY(cb <= cbPage))
2080 {
2081 memcpy(pvDst, pvSrc, cb);
2082 PGMPhysReleasePageMappingLock(pVM, &Lock);
2083 return VINF_SUCCESS;
2084 }
2085
2086 /* copy to the end of the page. */
2087 memcpy(pvDst, pvSrc, cbPage);
2088 PGMPhysReleasePageMappingLock(pVM, &Lock);
2089 GCPhysSrc += cbPage;
2090 pvDst = (uint8_t *)pvDst + cbPage;
2091 cb -= cbPage;
2092
2093 /*
2094 * Page by page.
2095 */
2096 for (;;)
2097 {
2098 /* map the page */
2099 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2100 if (RT_FAILURE(rc))
2101 return rc;
2102
2103 /* last page? */
2104 if (cb <= PAGE_SIZE)
2105 {
2106 memcpy(pvDst, pvSrc, cb);
2107 PGMPhysReleasePageMappingLock(pVM, &Lock);
2108 return VINF_SUCCESS;
2109 }
2110
2111 /* copy the entire page and advance */
2112 memcpy(pvDst, pvSrc, PAGE_SIZE);
2113 PGMPhysReleasePageMappingLock(pVM, &Lock);
2114 GCPhysSrc += PAGE_SIZE;
2115 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2116 cb -= PAGE_SIZE;
2117 }
2118 /* won't ever get here. */
2119}
2120
2121#ifndef IN_RC /* Ring 0 & 3 only. (Just not needed in GC.) */
2122
2123/**
2124 * Write to guest physical memory referenced by GC pointer.
2125 * Write memory to GC physical address in guest physical memory.
2126 *
2127 * This will bypass MMIO and access handlers.
2128 *
2129 * @returns VBox status.
2130 * @param pVM VM handle.
2131 * @param GCPhysDst The GC physical address of the destination.
2132 * @param pvSrc The source buffer.
2133 * @param cb The number of bytes to write.
2134 */
2135VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2136{
2137 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2138
2139 /*
2140 * Treat the first page as a special case.
2141 */
2142 if (!cb)
2143 return VINF_SUCCESS;
2144
2145 /* map the 1st page */
2146 void *pvDst;
2147 PGMPAGEMAPLOCK Lock;
2148 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2149 if (RT_FAILURE(rc))
2150 return rc;
2151
2152 /* optimize for the case where access is completely within the first page. */
2153 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2154 if (RT_LIKELY(cb <= cbPage))
2155 {
2156 memcpy(pvDst, pvSrc, cb);
2157 PGMPhysReleasePageMappingLock(pVM, &Lock);
2158 return VINF_SUCCESS;
2159 }
2160
2161 /* copy to the end of the page. */
2162 memcpy(pvDst, pvSrc, cbPage);
2163 PGMPhysReleasePageMappingLock(pVM, &Lock);
2164 GCPhysDst += cbPage;
2165 pvSrc = (const uint8_t *)pvSrc + cbPage;
2166 cb -= cbPage;
2167
2168 /*
2169 * Page by page.
2170 */
2171 for (;;)
2172 {
2173 /* map the page */
2174 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2175 if (RT_FAILURE(rc))
2176 return rc;
2177
2178 /* last page? */
2179 if (cb <= PAGE_SIZE)
2180 {
2181 memcpy(pvDst, pvSrc, cb);
2182 PGMPhysReleasePageMappingLock(pVM, &Lock);
2183 return VINF_SUCCESS;
2184 }
2185
2186 /* copy the entire page and advance */
2187 memcpy(pvDst, pvSrc, PAGE_SIZE);
2188 PGMPhysReleasePageMappingLock(pVM, &Lock);
2189 GCPhysDst += PAGE_SIZE;
2190 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2191 cb -= PAGE_SIZE;
2192 }
2193 /* won't ever get here. */
2194}
2195
2196
2197/**
2198 * Read from guest physical memory referenced by GC pointer.
2199 *
2200 * This function uses the current CR3/CR0/CR4 of the guest and will
2201 * bypass access handlers and not set any accessed bits.
2202 *
2203 * @returns VBox status.
2204 * @param pVM VM handle.
2205 * @param pvDst The destination address.
2206 * @param GCPtrSrc The source address (GC pointer).
2207 * @param cb The number of bytes to read.
2208 */
2209VMMDECL(int) PGMPhysSimpleReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2210{
2211 /*
2212 * Treat the first page as a special case.
2213 */
2214 if (!cb)
2215 return VINF_SUCCESS;
2216
2217 /* map the 1st page */
2218 void const *pvSrc;
2219 PGMPAGEMAPLOCK Lock;
2220 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2221 if (RT_FAILURE(rc))
2222 return rc;
2223
2224 /* optimize for the case where access is completely within the first page. */
2225 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2226 if (RT_LIKELY(cb <= cbPage))
2227 {
2228 memcpy(pvDst, pvSrc, cb);
2229 PGMPhysReleasePageMappingLock(pVM, &Lock);
2230 return VINF_SUCCESS;
2231 }
2232
2233 /* copy to the end of the page. */
2234 memcpy(pvDst, pvSrc, cbPage);
2235 PGMPhysReleasePageMappingLock(pVM, &Lock);
2236 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2237 pvDst = (uint8_t *)pvDst + cbPage;
2238 cb -= cbPage;
2239
2240 /*
2241 * Page by page.
2242 */
2243 for (;;)
2244 {
2245 /* map the page */
2246 rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2247 if (RT_FAILURE(rc))
2248 return rc;
2249
2250 /* last page? */
2251 if (cb <= PAGE_SIZE)
2252 {
2253 memcpy(pvDst, pvSrc, cb);
2254 PGMPhysReleasePageMappingLock(pVM, &Lock);
2255 return VINF_SUCCESS;
2256 }
2257
2258 /* copy the entire page and advance */
2259 memcpy(pvDst, pvSrc, PAGE_SIZE);
2260 PGMPhysReleasePageMappingLock(pVM, &Lock);
2261 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2262 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2263 cb -= PAGE_SIZE;
2264 }
2265 /* won't ever get here. */
2266}
2267
2268
2269/**
2270 * Write to guest physical memory referenced by GC pointer.
2271 *
2272 * This function uses the current CR3/CR0/CR4 of the guest and will
2273 * bypass access handlers and not set dirty or accessed bits.
2274 *
2275 * @returns VBox status.
2276 * @param pVM VM handle.
2277 * @param GCPtrDst The destination address (GC pointer).
2278 * @param pvSrc The source address.
2279 * @param cb The number of bytes to write.
2280 */
2281VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2282{
2283 /*
2284 * Treat the first page as a special case.
2285 */
2286 if (!cb)
2287 return VINF_SUCCESS;
2288
2289 /* map the 1st page */
2290 void *pvDst;
2291 PGMPAGEMAPLOCK Lock;
2292 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2293 if (RT_FAILURE(rc))
2294 return rc;
2295
2296 /* optimize for the case where access is completely within the first page. */
2297 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2298 if (RT_LIKELY(cb <= cbPage))
2299 {
2300 memcpy(pvDst, pvSrc, cb);
2301 PGMPhysReleasePageMappingLock(pVM, &Lock);
2302 return VINF_SUCCESS;
2303 }
2304
2305 /* copy to the end of the page. */
2306 memcpy(pvDst, pvSrc, cbPage);
2307 PGMPhysReleasePageMappingLock(pVM, &Lock);
2308 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2309 pvSrc = (const uint8_t *)pvSrc + cbPage;
2310 cb -= cbPage;
2311
2312 /*
2313 * Page by page.
2314 */
2315 for (;;)
2316 {
2317 /* map the page */
2318 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2319 if (RT_FAILURE(rc))
2320 return rc;
2321
2322 /* last page? */
2323 if (cb <= PAGE_SIZE)
2324 {
2325 memcpy(pvDst, pvSrc, cb);
2326 PGMPhysReleasePageMappingLock(pVM, &Lock);
2327 return VINF_SUCCESS;
2328 }
2329
2330 /* copy the entire page and advance */
2331 memcpy(pvDst, pvSrc, PAGE_SIZE);
2332 PGMPhysReleasePageMappingLock(pVM, &Lock);
2333 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2334 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2335 cb -= PAGE_SIZE;
2336 }
2337 /* won't ever get here. */
2338}
2339
2340
2341/**
2342 * Write to guest physical memory referenced by GC pointer and update the PTE.
2343 *
2344 * This function uses the current CR3/CR0/CR4 of the guest and will
2345 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2346 *
2347 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2348 *
2349 * @returns VBox status.
2350 * @param pVM VM handle.
2351 * @param GCPtrDst The destination address (GC pointer).
2352 * @param pvSrc The source address.
2353 * @param cb The number of bytes to write.
2354 */
2355VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2356{
2357 /*
2358 * Treat the first page as a special case.
2359 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2360 */
2361 if (!cb)
2362 return VINF_SUCCESS;
2363
2364 /* map the 1st page */
2365 void *pvDst;
2366 PGMPAGEMAPLOCK Lock;
2367 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2368 if (RT_FAILURE(rc))
2369 return rc;
2370
2371 /* optimize for the case where access is completely within the first page. */
2372 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2373 if (RT_LIKELY(cb <= cbPage))
2374 {
2375 memcpy(pvDst, pvSrc, cb);
2376 PGMPhysReleasePageMappingLock(pVM, &Lock);
2377 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2378 return VINF_SUCCESS;
2379 }
2380
2381 /* copy to the end of the page. */
2382 memcpy(pvDst, pvSrc, cbPage);
2383 PGMPhysReleasePageMappingLock(pVM, &Lock);
2384 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2385 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2386 pvSrc = (const uint8_t *)pvSrc + cbPage;
2387 cb -= cbPage;
2388
2389 /*
2390 * Page by page.
2391 */
2392 for (;;)
2393 {
2394 /* map the page */
2395 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2396 if (RT_FAILURE(rc))
2397 return rc;
2398
2399 /* last page? */
2400 if (cb <= PAGE_SIZE)
2401 {
2402 memcpy(pvDst, pvSrc, cb);
2403 PGMPhysReleasePageMappingLock(pVM, &Lock);
2404 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2405 return VINF_SUCCESS;
2406 }
2407
2408 /* copy the entire page and advance */
2409 memcpy(pvDst, pvSrc, PAGE_SIZE);
2410 PGMPhysReleasePageMappingLock(pVM, &Lock);
2411 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2412 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2413 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2414 cb -= PAGE_SIZE;
2415 }
2416 /* won't ever get here. */
2417}
2418
2419
2420/**
2421 * Read from guest physical memory referenced by GC pointer.
2422 *
2423 * This function uses the current CR3/CR0/CR4 of the guest and will
2424 * respect access handlers and set accessed bits.
2425 *
2426 * @returns VBox status.
2427 * @param pVM VM handle.
2428 * @param pvDst The destination address.
2429 * @param GCPtrSrc The source address (GC pointer).
2430 * @param cb The number of bytes to read.
2431 * @thread The vCPU EMT.
2432 */
2433VMMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2434{
2435 RTGCPHYS GCPhys;
2436 uint64_t fFlags;
2437 int rc;
2438
2439 /*
2440 * Anything to do?
2441 */
2442 if (!cb)
2443 return VINF_SUCCESS;
2444
2445 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2446
2447 /*
2448 * Optimize reads within a single page.
2449 */
2450 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2451 {
2452 /* Convert virtual to physical address + flags */
2453 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2454 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2455 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2456
2457 /* mark the guest page as accessed. */
2458 if (!(fFlags & X86_PTE_A))
2459 {
2460 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2461 AssertRC(rc);
2462 }
2463
2464 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2465 }
2466
2467 /*
2468 * Page by page.
2469 */
2470 for (;;)
2471 {
2472 /* Convert virtual to physical address + flags */
2473 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2474 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2475 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2476
2477 /* mark the guest page as accessed. */
2478 if (!(fFlags & X86_PTE_A))
2479 {
2480 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2481 AssertRC(rc);
2482 }
2483
2484 /* copy */
2485 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2486 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2487 if (cbRead >= cb || RT_FAILURE(rc))
2488 return rc;
2489
2490 /* next */
2491 cb -= cbRead;
2492 pvDst = (uint8_t *)pvDst + cbRead;
2493 GCPtrSrc += cbRead;
2494 }
2495}
2496
2497
2498/**
2499 * Write to guest physical memory referenced by GC pointer.
2500 *
2501 * This function uses the current CR3/CR0/CR4 of the guest and will
2502 * respect access handlers and set dirty and accessed bits.
2503 *
2504 * @returns VBox status.
2505 * @retval VINF_SUCCESS.
2506 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2507 *
2508 * @param pVM VM handle.
2509 * @param GCPtrDst The destination address (GC pointer).
2510 * @param pvSrc The source address.
2511 * @param cb The number of bytes to write.
2512 */
2513VMMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2514{
2515 RTGCPHYS GCPhys;
2516 uint64_t fFlags;
2517 int rc;
2518
2519 /*
2520 * Anything to do?
2521 */
2522 if (!cb)
2523 return VINF_SUCCESS;
2524
2525 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2526
2527 /*
2528 * Optimize writes within a single page.
2529 */
2530 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2531 {
2532 /* Convert virtual to physical address + flags */
2533 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2534 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2535 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2536
2537 /* Mention when we ignore X86_PTE_RW... */
2538 if (!(fFlags & X86_PTE_RW))
2539 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2540
2541 /* Mark the guest page as accessed and dirty if necessary. */
2542 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2543 {
2544 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2545 AssertRC(rc);
2546 }
2547
2548 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2549 }
2550
2551 /*
2552 * Page by page.
2553 */
2554 for (;;)
2555 {
2556 /* Convert virtual to physical address + flags */
2557 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2558 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2559 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2560
2561 /* Mention when we ignore X86_PTE_RW... */
2562 if (!(fFlags & X86_PTE_RW))
2563 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2564
2565 /* Mark the guest page as accessed and dirty if necessary. */
2566 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2567 {
2568 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2569 AssertRC(rc);
2570 }
2571
2572 /* copy */
2573 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2574 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2575 if (cbWrite >= cb || RT_FAILURE(rc))
2576 return rc;
2577
2578 /* next */
2579 cb -= cbWrite;
2580 pvSrc = (uint8_t *)pvSrc + cbWrite;
2581 GCPtrDst += cbWrite;
2582 }
2583}
2584
2585#endif /* !IN_RC */
2586
2587/**
2588 * Performs a read of guest virtual memory for instruction emulation.
2589 *
2590 * This will check permissions, raise exceptions and update the access bits.
2591 *
2592 * The current implementation will bypass all access handlers. It may later be
2593 * changed to at least respect MMIO.
2594 *
2595 *
2596 * @returns VBox status code suitable to scheduling.
2597 * @retval VINF_SUCCESS if the read was performed successfully.
2598 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2599 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2600 *
2601 * @param pVM The VM handle.
2602 * @param pCtxCore The context core.
2603 * @param pvDst Where to put the bytes we've read.
2604 * @param GCPtrSrc The source address.
2605 * @param cb The number of bytes to read. Not more than a page.
2606 *
2607 * @remark This function will dynamically map physical pages in GC. This may unmap
2608 * mappings done by the caller. Be careful!
2609 */
2610VMMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2611{
2612 Assert(cb <= PAGE_SIZE);
2613
2614/** @todo r=bird: This isn't perfect!
2615 * -# It's not checking for reserved bits being 1.
2616 * -# It's not correctly dealing with the access bit.
2617 * -# It's not respecting MMIO memory or any other access handlers.
2618 */
2619 /*
2620 * 1. Translate virtual to physical. This may fault.
2621 * 2. Map the physical address.
2622 * 3. Do the read operation.
2623 * 4. Set access bits if required.
2624 */
2625 int rc;
2626 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2627 if (cb <= cb1)
2628 {
2629 /*
2630 * Not crossing pages.
2631 */
2632 RTGCPHYS GCPhys;
2633 uint64_t fFlags;
2634 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
2635 if (RT_SUCCESS(rc))
2636 {
2637 /** @todo we should check reserved bits ... */
2638 void *pvSrc;
2639 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2640 switch (rc)
2641 {
2642 case VINF_SUCCESS:
2643 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2644 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2645 break;
2646 case VERR_PGM_PHYS_PAGE_RESERVED:
2647 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2648 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2649 break;
2650 default:
2651 return rc;
2652 }
2653
2654 /** @todo access bit emulation isn't 100% correct. */
2655 if (!(fFlags & X86_PTE_A))
2656 {
2657 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2658 AssertRC(rc);
2659 }
2660 return VINF_SUCCESS;
2661 }
2662 }
2663 else
2664 {
2665 /*
2666 * Crosses pages.
2667 */
2668 size_t cb2 = cb - cb1;
2669 uint64_t fFlags1;
2670 RTGCPHYS GCPhys1;
2671 uint64_t fFlags2;
2672 RTGCPHYS GCPhys2;
2673 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
2674 if (RT_SUCCESS(rc))
2675 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2676 if (RT_SUCCESS(rc))
2677 {
2678 /** @todo we should check reserved bits ... */
2679 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2680 void *pvSrc1;
2681 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2682 switch (rc)
2683 {
2684 case VINF_SUCCESS:
2685 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2686 break;
2687 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2688 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2689 break;
2690 default:
2691 return rc;
2692 }
2693
2694 void *pvSrc2;
2695 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2696 switch (rc)
2697 {
2698 case VINF_SUCCESS:
2699 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2700 break;
2701 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2702 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2703 break;
2704 default:
2705 return rc;
2706 }
2707
2708 if (!(fFlags1 & X86_PTE_A))
2709 {
2710 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2711 AssertRC(rc);
2712 }
2713 if (!(fFlags2 & X86_PTE_A))
2714 {
2715 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2716 AssertRC(rc);
2717 }
2718 return VINF_SUCCESS;
2719 }
2720 }
2721
2722 /*
2723 * Raise a #PF.
2724 */
2725 uint32_t uErr;
2726
2727 /* Get the current privilege level. */
2728 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
2729 switch (rc)
2730 {
2731 case VINF_SUCCESS:
2732 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2733 break;
2734
2735 case VERR_PAGE_NOT_PRESENT:
2736 case VERR_PAGE_TABLE_NOT_PRESENT:
2737 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2738 break;
2739
2740 default:
2741 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2742 return rc;
2743 }
2744 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2745 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2746}
2747
2748
2749/**
2750 * Performs a read of guest virtual memory for instruction emulation.
2751 *
2752 * This will check permissions, raise exceptions and update the access bits.
2753 *
2754 * The current implementation will bypass all access handlers. It may later be
2755 * changed to at least respect MMIO.
2756 *
2757 *
2758 * @returns VBox status code suitable to scheduling.
2759 * @retval VINF_SUCCESS if the read was performed successfully.
2760 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2761 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2762 *
2763 * @param pVM The VM handle.
2764 * @param pCtxCore The context core.
2765 * @param pvDst Where to put the bytes we've read.
2766 * @param GCPtrSrc The source address.
2767 * @param cb The number of bytes to read. Not more than a page.
2768 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
2769 * an appropriate error status will be returned (no
2770 * informational at all).
2771 *
2772 *
2773 * @remarks Takes the PGM lock.
2774 * @remarks A page fault on the 2nd page of the access will be raised without
2775 * writing the bits on the first page since we're ASSUMING that the
2776 * caller is emulating an instruction access.
2777 * @remarks This function will dynamically map physical pages in GC. This may
2778 * unmap mappings done by the caller. Be careful!
2779 */
2780VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
2781{
2782 Assert(cb <= PAGE_SIZE);
2783
2784 /*
2785 * 1. Translate virtual to physical. This may fault.
2786 * 2. Map the physical address.
2787 * 3. Do the read operation.
2788 * 4. Set access bits if required.
2789 */
2790 int rc;
2791 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2792 if (cb <= cb1)
2793 {
2794 /*
2795 * Not crossing pages.
2796 */
2797 RTGCPHYS GCPhys;
2798 uint64_t fFlags;
2799 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
2800 if (RT_SUCCESS(rc))
2801 {
2802 if (1) /** @todo we should check reserved bits ... */
2803 {
2804 const void *pvSrc;
2805 PGMPAGEMAPLOCK Lock;
2806 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
2807 switch (rc)
2808 {
2809 case VINF_SUCCESS:
2810 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
2811 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
2812 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2813 break;
2814 case VERR_PGM_PHYS_PAGE_RESERVED:
2815 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2816 memset(pvDst, 0xff, cb);
2817 break;
2818 default:
2819 AssertMsgFailed(("%Rrc\n", rc));
2820 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2821 return rc;
2822 }
2823 PGMPhysReleasePageMappingLock(pVM, &Lock);
2824
2825 if (!(fFlags & X86_PTE_A))
2826 {
2827 /** @todo access bit emulation isn't 100% correct. */
2828 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2829 AssertRC(rc);
2830 }
2831 return VINF_SUCCESS;
2832 }
2833 }
2834 }
2835 else
2836 {
2837 /*
2838 * Crosses pages.
2839 */
2840 size_t cb2 = cb - cb1;
2841 uint64_t fFlags1;
2842 RTGCPHYS GCPhys1;
2843 uint64_t fFlags2;
2844 RTGCPHYS GCPhys2;
2845 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
2846 if (RT_SUCCESS(rc))
2847 {
2848 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2849 if (RT_SUCCESS(rc))
2850 {
2851 if (1) /** @todo we should check reserved bits ... */
2852 {
2853 const void *pvSrc;
2854 PGMPAGEMAPLOCK Lock;
2855 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
2856 switch (rc)
2857 {
2858 case VINF_SUCCESS:
2859 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
2860 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
2861 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2862 PGMPhysReleasePageMappingLock(pVM, &Lock);
2863 break;
2864 case VERR_PGM_PHYS_PAGE_RESERVED:
2865 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2866 memset(pvDst, 0xff, cb1);
2867 break;
2868 default:
2869 AssertMsgFailed(("%Rrc\n", rc));
2870 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2871 return rc;
2872 }
2873
2874 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
2875 switch (rc)
2876 {
2877 case VINF_SUCCESS:
2878 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
2879 PGMPhysReleasePageMappingLock(pVM, &Lock);
2880 break;
2881 case VERR_PGM_PHYS_PAGE_RESERVED:
2882 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2883 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
2884 break;
2885 default:
2886 AssertMsgFailed(("%Rrc\n", rc));
2887 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2888 return rc;
2889 }
2890
2891 if (!(fFlags1 & X86_PTE_A))
2892 {
2893 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2894 AssertRC(rc);
2895 }
2896 if (!(fFlags2 & X86_PTE_A))
2897 {
2898 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2899 AssertRC(rc);
2900 }
2901 return VINF_SUCCESS;
2902 }
2903 /* sort out which page */
2904 }
2905 else
2906 GCPtrSrc += cb1; /* fault on 2nd page */
2907 }
2908 }
2909
2910 /*
2911 * Raise a #PF if we're allowed to do that.
2912 */
2913 /* Calc the error bits. */
2914 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
2915 uint32_t uErr;
2916 switch (rc)
2917 {
2918 case VINF_SUCCESS:
2919 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2920 rc = VERR_ACCESS_DENIED;
2921 break;
2922
2923 case VERR_PAGE_NOT_PRESENT:
2924 case VERR_PAGE_TABLE_NOT_PRESENT:
2925 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2926 break;
2927
2928 default:
2929 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2930 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2931 return rc;
2932 }
2933 if (fRaiseTrap)
2934 {
2935 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
2936 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2937 }
2938 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
2939 return rc;
2940}
2941
2942
2943/**
2944 * Performs a write to guest virtual memory for instruction emulation.
2945 *
2946 * This will check permissions, raise exceptions and update the dirty and access
2947 * bits.
2948 *
2949 * @returns VBox status code suitable to scheduling.
2950 * @retval VINF_SUCCESS if the read was performed successfully.
2951 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2952 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2953 *
2954 * @param pVM The VM handle.
2955 * @param pCtxCore The context core.
2956 * @param GCPtrDst The destination address.
2957 * @param pvSrc What to write.
2958 * @param cb The number of bytes to write. Not more than a page.
2959 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
2960 * an appropriate error status will be returned (no
2961 * informational at all).
2962 *
2963 * @remarks Takes the PGM lock.
2964 * @remarks A page fault on the 2nd page of the access will be raised without
2965 * writing the bits on the first page since we're ASSUMING that the
2966 * caller is emulating an instruction access.
2967 * @remarks This function will dynamically map physical pages in GC. This may
2968 * unmap mappings done by the caller. Be careful!
2969 */
2970VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
2971{
2972 Assert(cb <= PAGE_SIZE);
2973
2974 /*
2975 * 1. Translate virtual to physical. This may fault.
2976 * 2. Map the physical address.
2977 * 3. Do the write operation.
2978 * 4. Set access bits if required.
2979 */
2980 int rc;
2981 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
2982 if (cb <= cb1)
2983 {
2984 /*
2985 * Not crossing pages.
2986 */
2987 RTGCPHYS GCPhys;
2988 uint64_t fFlags;
2989 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags, &GCPhys);
2990 if (RT_SUCCESS(rc))
2991 {
2992 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
2993 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
2994 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
2995 {
2996 void *pvDst;
2997 PGMPAGEMAPLOCK Lock;
2998 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
2999 switch (rc)
3000 {
3001 case VINF_SUCCESS:
3002 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3003 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3004 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3005 PGMPhysReleasePageMappingLock(pVM, &Lock);
3006 break;
3007 case VERR_PGM_PHYS_PAGE_RESERVED:
3008 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3009 /* bit bucket */
3010 break;
3011 default:
3012 AssertMsgFailed(("%Rrc\n", rc));
3013 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3014 return rc;
3015 }
3016
3017 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3018 {
3019 /** @todo dirty & access bit emulation isn't 100% correct. */
3020 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3021 AssertRC(rc);
3022 }
3023 return VINF_SUCCESS;
3024 }
3025 rc = VERR_ACCESS_DENIED;
3026 }
3027 }
3028 else
3029 {
3030 /*
3031 * Crosses pages.
3032 */
3033 size_t cb2 = cb - cb1;
3034 uint64_t fFlags1;
3035 RTGCPHYS GCPhys1;
3036 uint64_t fFlags2;
3037 RTGCPHYS GCPhys2;
3038 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags1, &GCPhys1);
3039 if (RT_SUCCESS(rc))
3040 {
3041 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3042 if (RT_SUCCESS(rc))
3043 {
3044 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3045 && (fFlags2 & X86_PTE_RW))
3046 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
3047 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) )
3048 {
3049 void *pvDst;
3050 PGMPAGEMAPLOCK Lock;
3051 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3052 switch (rc)
3053 {
3054 case VINF_SUCCESS:
3055 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3056 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3057 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3058 PGMPhysReleasePageMappingLock(pVM, &Lock);
3059 break;
3060 case VERR_PGM_PHYS_PAGE_RESERVED:
3061 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3062 /* bit bucket */
3063 break;
3064 default:
3065 AssertMsgFailed(("%Rrc\n", rc));
3066 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3067 return rc;
3068 }
3069
3070 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3071 switch (rc)
3072 {
3073 case VINF_SUCCESS:
3074 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3075 PGMPhysReleasePageMappingLock(pVM, &Lock);
3076 break;
3077 case VERR_PGM_PHYS_PAGE_RESERVED:
3078 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3079 /* bit bucket */
3080 break;
3081 default:
3082 AssertMsgFailed(("%Rrc\n", rc));
3083 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3084 return rc;
3085 }
3086
3087 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3088 {
3089 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3090 AssertRC(rc);
3091 }
3092 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3093 {
3094 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3095 AssertRC(rc);
3096 }
3097 return VINF_SUCCESS;
3098 }
3099 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3100 GCPtrDst += cb1; /* fault on the 2nd page. */
3101 rc = VERR_ACCESS_DENIED;
3102 }
3103 else
3104 GCPtrDst += cb1; /* fault on the 2nd page. */
3105 }
3106 }
3107
3108 /*
3109 * Raise a #PF if we're allowed to do that.
3110 */
3111 /* Calc the error bits. */
3112 uint32_t uErr;
3113 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3114 switch (rc)
3115 {
3116 case VINF_SUCCESS:
3117 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3118 rc = VERR_ACCESS_DENIED;
3119 break;
3120
3121 case VERR_ACCESS_DENIED:
3122 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3123 break;
3124
3125 case VERR_PAGE_NOT_PRESENT:
3126 case VERR_PAGE_TABLE_NOT_PRESENT:
3127 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3128 break;
3129
3130 default:
3131 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3132 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3133 return rc;
3134 }
3135 if (fRaiseTrap)
3136 {
3137 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3138 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3139 }
3140 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3141 return rc;
3142}
3143
3144
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette