VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 18338

Last change on this file since 18338 was 18279, checked in by vboxsync, 16 years ago

PGMAllPhys.cpp: reverted r44713.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 138.8 KB
Line 
1/* $Id: PGMAllPhys.cpp 18279 2009-03-25 20:06:01Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Defined Constants And Macros *
24*******************************************************************************/
25/** @def PGM_IGNORE_RAM_FLAGS_RESERVED
26 * Don't respect the MM_RAM_FLAGS_RESERVED flag when converting to HC addresses.
27 *
28 * Since this flag is currently incorrectly kept set for ROM regions we will
29 * have to ignore it for now so we don't break stuff.
30 *
31 * @todo this has been fixed now I believe, remove this hack.
32 */
33#define PGM_IGNORE_RAM_FLAGS_RESERVED
34
35
36/*******************************************************************************
37* Header Files *
38*******************************************************************************/
39#define LOG_GROUP LOG_GROUP_PGM_PHYS
40#include <VBox/pgm.h>
41#include <VBox/trpm.h>
42#include <VBox/vmm.h>
43#include <VBox/iom.h>
44#include <VBox/em.h>
45#include <VBox/rem.h>
46#include "PGMInternal.h"
47#include <VBox/vm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <iprt/assert.h>
51#include <iprt/string.h>
52#include <iprt/asm.h>
53#include <VBox/log.h>
54#ifdef IN_RING3
55# include <iprt/thread.h>
56#endif
57
58
59
60#ifndef IN_RING3
61
62/**
63 * \#PF Handler callback for Guest ROM range write access.
64 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
65 *
66 * @returns VBox status code (appropritate for trap handling and GC return).
67 * @param pVM VM Handle.
68 * @param uErrorCode CPU Error code.
69 * @param pRegFrame Trap register frame.
70 * @param pvFault The fault address (cr2).
71 * @param GCPhysFault The GC physical address corresponding to pvFault.
72 * @param pvUser User argument. Pointer to the ROM range structure.
73 */
74VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
75{
76 int rc;
77#ifdef VBOX_WITH_NEW_PHYS_CODE
78 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
79 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
80 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
81 switch (pRom->aPages[iPage].enmProt)
82 {
83 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
84 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
85 {
86#endif
87 /*
88 * If it's a simple instruction which doesn't change the cpu state
89 * we will simply skip it. Otherwise we'll have to defer it to REM.
90 */
91 uint32_t cbOp;
92 DISCPUSTATE Cpu;
93 rc = EMInterpretDisasOne(pVM, pRegFrame, &Cpu, &cbOp);
94 if ( RT_SUCCESS(rc)
95 && Cpu.mode == CPUMODE_32BIT /** @todo why does this matter? */
96 && !(Cpu.prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
97 {
98 switch (Cpu.opcode)
99 {
100 /** @todo Find other instructions we can safely skip, possibly
101 * adding this kind of detection to DIS or EM. */
102 case OP_MOV:
103 pRegFrame->rip += cbOp;
104 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteHandled);
105 return VINF_SUCCESS;
106 }
107 }
108 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
109 return rc;
110#ifdef VBOX_WITH_NEW_PHYS_CODE
111 break;
112 }
113
114 case PGMROMPROT_READ_RAM_WRITE_RAM:
115 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
116 AssertRC(rc);
117 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
118
119 case PGMROMPROT_READ_ROM_WRITE_RAM:
120 /* Handle it in ring-3 because it's *way* easier there. */
121 break;
122
123 default:
124 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
125 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
126 VERR_INTERNAL_ERROR);
127 }
128#endif
129
130 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteUnhandled);
131 return VINF_EM_RAW_EMULATE_INSTR;
132}
133
134#endif /* IN_RING3 */
135
136/**
137 * Checks if Address Gate 20 is enabled or not.
138 *
139 * @returns true if enabled.
140 * @returns false if disabled.
141 * @param pVM VM handle.
142 */
143VMMDECL(bool) PGMPhysIsA20Enabled(PVM pVM)
144{
145 LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
146 return pVM->pgm.s.fA20Enabled;
147}
148
149
150/**
151 * Validates a GC physical address.
152 *
153 * @returns true if valid.
154 * @returns false if invalid.
155 * @param pVM The VM handle.
156 * @param GCPhys The physical address to validate.
157 */
158VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
159{
160 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
161 return pPage != NULL;
162}
163
164
165/**
166 * Checks if a GC physical address is a normal page,
167 * i.e. not ROM, MMIO or reserved.
168 *
169 * @returns true if normal.
170 * @returns false if invalid, ROM, MMIO or reserved page.
171 * @param pVM The VM handle.
172 * @param GCPhys The physical address to check.
173 */
174VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
175{
176 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
177#ifdef VBOX_WITH_NEW_PHYS_CODE
178 return pPage
179 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
180#else
181 return pPage
182 && !(pPage->HCPhys & (MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2));
183#endif
184}
185
186
187/**
188 * Converts a GC physical address to a HC physical address.
189 *
190 * @returns VINF_SUCCESS on success.
191 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
192 * page but has no physical backing.
193 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
194 * GC physical address.
195 *
196 * @param pVM The VM handle.
197 * @param GCPhys The GC physical address to convert.
198 * @param pHCPhys Where to store the HC physical address on success.
199 */
200VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
201{
202 PPGMPAGE pPage;
203 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
204 if (RT_FAILURE(rc))
205 return rc;
206
207#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
208 if (RT_UNLIKELY(pPage->HCPhys & MM_RAM_FLAGS_RESERVED)) /** @todo PAGE FLAGS */
209 return VERR_PGM_PHYS_PAGE_RESERVED;
210#endif
211
212 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
213 return VINF_SUCCESS;
214}
215
216
217/**
218 * Invalidates the GC page mapping TLB.
219 *
220 * @param pVM The VM handle.
221 */
222VMMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
223{
224 /* later */
225 NOREF(pVM);
226}
227
228
229/**
230 * Invalidates the ring-0 page mapping TLB.
231 *
232 * @param pVM The VM handle.
233 */
234VMMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
235{
236 PGMPhysInvalidatePageR3MapTLB(pVM);
237}
238
239
240/**
241 * Invalidates the ring-3 page mapping TLB.
242 *
243 * @param pVM The VM handle.
244 */
245VMMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
246{
247 pgmLock(pVM);
248 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
249 {
250 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
251 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
252 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
253 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
254 }
255 pgmUnlock(pVM);
256}
257
258
259/**
260 * Makes sure that there is at least one handy page ready for use.
261 *
262 * This will also take the appropriate actions when reaching water-marks.
263 *
264 * @returns The following VBox status codes.
265 * @retval VINF_SUCCESS on success.
266 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
267 *
268 * @param pVM The VM handle.
269 *
270 * @remarks Must be called from within the PGM critical section. It may
271 * nip back to ring-3/0 in some cases.
272 */
273static int pgmPhysEnsureHandyPage(PVM pVM)
274{
275 /** @remarks
276 * low-water mark logic for R0 & GC:
277 * - 75%: Set FF.
278 * - 50%: Force return to ring-3 ASAP.
279 *
280 * For ring-3 there is a little problem wrt to the recompiler, so:
281 * - 75%: Set FF.
282 * - 50%: Try allocate pages; on failure we'll force REM to quite ASAP.
283 *
284 * The basic idea is that we should be able to get out of any situation with
285 * only 50% of handy pages remaining.
286 *
287 * At the moment we'll not adjust the number of handy pages relative to the
288 * actual VM RAM committment, that's too much work for now.
289 */
290 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
291 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
292 if ( !pVM->pgm.s.cHandyPages
293#ifdef IN_RING3
294 || pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */
295#endif
296 )
297 {
298 Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
299#ifdef IN_RING3
300 int rc = PGMR3PhysAllocateHandyPages(pVM);
301#elif defined(IN_RING0)
302 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
303#else
304 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
305#endif
306 if (RT_UNLIKELY(rc != VINF_SUCCESS))
307 {
308 AssertMsg(rc == VINF_EM_NO_MEMORY || rc == VERR_EM_NO_MEMORY, ("%Rrc\n", rc));
309 if (!pVM->pgm.s.cHandyPages)
310 {
311 LogRel(("PGM: no more handy pages!\n"));
312 return VERR_EM_NO_MEMORY;
313 }
314 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
315#ifdef IN_RING3
316 REMR3NotifyFF(pVM);
317#else
318 VM_FF_SET(pVM, VM_FF_TO_R3);
319#endif
320 }
321 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
322 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
323 ("%u\n", pVM->pgm.s.cHandyPages),
324 VERR_INTERNAL_ERROR);
325 }
326 else if (pVM->pgm.s.cHandyPages - 1 <= (RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 4) * 3) /* 75% */
327 {
328 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
329#ifndef IN_RING3
330 if (pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2)
331 {
332 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
333 VM_FF_SET(pVM, VM_FF_TO_R3);
334 }
335#endif
336 }
337
338 return VINF_SUCCESS;
339}
340
341
342/**
343 * Replace a zero or shared page with new page that we can write to.
344 *
345 * @returns The following VBox status codes.
346 * @retval VINF_SUCCESS on success, pPage is modified.
347 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
348 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
349 *
350 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
351 *
352 * @param pVM The VM address.
353 * @param pPage The physical page tracking structure. This will
354 * be modified on success.
355 * @param GCPhys The address of the page.
356 *
357 * @remarks Must be called from within the PGM critical section. It may
358 * nip back to ring-3/0 in some cases.
359 *
360 * @remarks This function shouldn't really fail, however if it does
361 * it probably means we've screwed up the size of the amount
362 * and/or the low-water mark of handy pages. Or, that some
363 * device I/O is causing a lot of pages to be allocated while
364 * while the host is in a low-memory condition.
365 */
366int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
367{
368 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
369
370 /*
371 * Prereqs.
372 */
373 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
374 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
375 Assert(!PGM_PAGE_IS_MMIO(pPage));
376
377
378 /*
379 * Flush any shadow page table mappings of the page.
380 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
381 */
382 bool fFlushTLBs = false;
383 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
384 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_GCPHYS_ALIASED, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR);
385
386 /*
387 * Ensure that we've got a page handy, take it and use it.
388 */
389 int rc2 = pgmPhysEnsureHandyPage(pVM);
390 if (RT_FAILURE(rc2))
391 {
392 if (fFlushTLBs)
393 PGM_INVL_GUEST_TLBS();
394 Assert(rc2 == VERR_EM_NO_MEMORY);
395 return rc2;
396 }
397 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
398 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
399 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
400 Assert(!PGM_PAGE_IS_MMIO(pPage));
401
402 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
403 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
404 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
405 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
406 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
407 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
408
409 /*
410 * There are one or two action to be taken the next time we allocate handy pages:
411 * - Tell the GMM (global memory manager) what the page is being used for.
412 * (Speeds up replacement operations - sharing and defragmenting.)
413 * - If the current backing is shared, it must be freed.
414 */
415 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
416 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
417
418 if (PGM_PAGE_IS_SHARED(pPage))
419 {
420 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
421 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
422 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
423
424 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
425 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
426 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
427 pVM->pgm.s.cSharedPages--;
428 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
429 }
430 else
431 {
432 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
433 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
434 pVM->pgm.s.cZeroPages--;
435 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
436 }
437
438 /*
439 * Do the PGMPAGE modifications.
440 */
441 pVM->pgm.s.cPrivatePages++;
442 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
443 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
444 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
445
446 if ( fFlushTLBs
447 && rc != VINF_PGM_GCPHYS_ALIASED)
448 PGM_INVL_GUEST_TLBS();
449 return rc;
450}
451
452
453/**
454 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
455 *
456 * @returns VBox status code.
457 * @retval VINF_SUCCESS on success.
458 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
459 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
460 *
461 * @param pVM The VM address.
462 * @param pPage The physical page tracking structure.
463 * @param GCPhys The address of the page.
464 *
465 * @remarks Called from within the PGM critical section.
466 */
467int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
468{
469 switch (PGM_PAGE_GET_STATE(pPage))
470 {
471 case PGM_PAGE_STATE_WRITE_MONITORED:
472 PGM_PAGE_SET_WRITTEN_TO(pPage);
473 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
474 /* fall thru */
475 default: /* to shut up GCC */
476 case PGM_PAGE_STATE_ALLOCATED:
477 return VINF_SUCCESS;
478
479 /*
480 * Zero pages can be dummy pages for MMIO or reserved memory,
481 * so we need to check the flags before joining cause with
482 * shared page replacement.
483 */
484 case PGM_PAGE_STATE_ZERO:
485 if (PGM_PAGE_IS_MMIO(pPage))
486 return VERR_PGM_PHYS_PAGE_RESERVED;
487 /* fall thru */
488 case PGM_PAGE_STATE_SHARED:
489 return pgmPhysAllocPage(pVM, pPage, GCPhys);
490 }
491}
492
493
494/**
495 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
496 *
497 * @returns VBox status code.
498 * @retval VINF_SUCCESS on success.
499 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
500 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
501 *
502 * @param pVM The VM address.
503 * @param pPage The physical page tracking structure.
504 * @param GCPhys The address of the page.
505 */
506int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
507{
508 int rc = pgmLock(pVM);
509 if (RT_SUCCESS(rc))
510 {
511 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
512 pgmUnlock(pVM);
513 }
514 return rc;
515}
516
517
518/**
519 * Internal usage: Map the page specified by its GMM ID.
520 *
521 * This is similar to pgmPhysPageMap
522 *
523 * @returns VBox status code.
524 *
525 * @param pVM The VM handle.
526 * @param idPage The Page ID.
527 * @param HCPhys The physical address (for RC).
528 * @param ppv Where to store the mapping address.
529 *
530 * @remarks Called from within the PGM critical section.
531 */
532int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
533{
534 /*
535 * Validation.
536 */
537 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
538 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
539 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
540 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
541
542#ifdef IN_RC
543 /*
544 * Map it by HCPhys.
545 */
546 return PGMDynMapHCPage(pVM, HCPhys, ppv);
547
548#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
549 /*
550 * Map it by HCPhys.
551 */
552 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
553
554#else
555 /*
556 * Find/make Chunk TLB entry for the mapping chunk.
557 */
558 PPGMCHUNKR3MAP pMap;
559 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
560 if (pTlbe->idChunk == idChunk)
561 {
562 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
563 pMap = pTlbe->pChunk;
564 }
565 else
566 {
567 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
568
569 /*
570 * Find the chunk, map it if necessary.
571 */
572 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
573 if (!pMap)
574 {
575# ifdef IN_RING0
576 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
577 AssertRCReturn(rc, rc);
578 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
579 Assert(pMap);
580# else
581 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
582 if (RT_FAILURE(rc))
583 return rc;
584# endif
585 }
586
587 /*
588 * Enter it into the Chunk TLB.
589 */
590 pTlbe->idChunk = idChunk;
591 pTlbe->pChunk = pMap;
592 pMap->iAge = 0;
593 }
594
595 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
596 return VINF_SUCCESS;
597#endif
598}
599
600
601/**
602 * Maps a page into the current virtual address space so it can be accessed.
603 *
604 * @returns VBox status code.
605 * @retval VINF_SUCCESS on success.
606 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
607 *
608 * @param pVM The VM address.
609 * @param pPage The physical page tracking structure.
610 * @param GCPhys The address of the page.
611 * @param ppMap Where to store the address of the mapping tracking structure.
612 * @param ppv Where to store the mapping address of the page. The page
613 * offset is masked off!
614 *
615 * @remarks Called from within the PGM critical section.
616 */
617int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
618{
619 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
620
621#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
622 /*
623 * Just some sketchy GC/R0-darwin code.
624 */
625 *ppMap = NULL;
626 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
627 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
628# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
629 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
630# else
631 PGMDynMapHCPage(pVM, HCPhys, ppv);
632# endif
633 return VINF_SUCCESS;
634
635#else /* IN_RING3 || IN_RING0 */
636
637
638 /*
639 * Special case: ZERO and MMIO2 pages.
640 */
641 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
642 if (idChunk == NIL_GMM_CHUNKID)
643 {
644 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR);
645 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
646 {
647 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
648 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
649 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR);
650 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
651 }
652 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
653 {
654 /** @todo deal with aliased MMIO2 pages somehow...
655 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
656 * them, that would also avoid this mess. It would actually be kind of
657 * elegant... */
658 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR);
659 }
660 else
661 {
662 /** @todo handle MMIO2 */
663 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR);
664 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
665 ("pPage=%R[pgmpage]\n", pPage),
666 VERR_INTERNAL_ERROR);
667 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
668 }
669 *ppMap = NULL;
670 return VINF_SUCCESS;
671 }
672
673 /*
674 * Find/make Chunk TLB entry for the mapping chunk.
675 */
676 PPGMCHUNKR3MAP pMap;
677 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
678 if (pTlbe->idChunk == idChunk)
679 {
680 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
681 pMap = pTlbe->pChunk;
682 }
683 else
684 {
685 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
686
687 /*
688 * Find the chunk, map it if necessary.
689 */
690 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
691 if (!pMap)
692 {
693#ifdef IN_RING0
694 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
695 AssertRCReturn(rc, rc);
696 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
697 Assert(pMap);
698#else
699 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
700 if (RT_FAILURE(rc))
701 return rc;
702#endif
703 }
704
705 /*
706 * Enter it into the Chunk TLB.
707 */
708 pTlbe->idChunk = idChunk;
709 pTlbe->pChunk = pMap;
710 pMap->iAge = 0;
711 }
712
713 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
714 *ppMap = pMap;
715 return VINF_SUCCESS;
716#endif /* IN_RING3 */
717}
718
719
720#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
721/**
722 * Load a guest page into the ring-3 physical TLB.
723 *
724 * @returns VBox status code.
725 * @retval VINF_SUCCESS on success
726 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
727 * @param pPGM The PGM instance pointer.
728 * @param GCPhys The guest physical address in question.
729 */
730int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
731{
732 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
733
734 /*
735 * Find the ram range.
736 * 99.8% of requests are expected to be in the first range.
737 */
738 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
739 RTGCPHYS off = GCPhys - pRam->GCPhys;
740 if (RT_UNLIKELY(off >= pRam->cb))
741 {
742 do
743 {
744 pRam = pRam->CTX_SUFF(pNext);
745 if (!pRam)
746 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
747 off = GCPhys - pRam->GCPhys;
748 } while (off >= pRam->cb);
749 }
750
751 /*
752 * Map the page.
753 * Make a special case for the zero page as it is kind of special.
754 */
755 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
756 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
757 if (!PGM_PAGE_IS_ZERO(pPage))
758 {
759 void *pv;
760 PPGMPAGEMAP pMap;
761 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
762 if (RT_FAILURE(rc))
763 return rc;
764 pTlbe->pMap = pMap;
765 pTlbe->pv = pv;
766 }
767 else
768 {
769 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
770 pTlbe->pMap = NULL;
771 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
772 }
773 pTlbe->pPage = pPage;
774 return VINF_SUCCESS;
775}
776
777
778/**
779 * Load a guest page into the ring-3 physical TLB.
780 *
781 * @returns VBox status code.
782 * @retval VINF_SUCCESS on success
783 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
784 *
785 * @param pPGM The PGM instance pointer.
786 * @param pPage Pointer to the PGMPAGE structure corresponding to
787 * GCPhys.
788 * @param GCPhys The guest physical address in question.
789 */
790int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
791{
792 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
793
794 /*
795 * Map the page.
796 * Make a special case for the zero page as it is kind of special.
797 */
798 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
799 if (!PGM_PAGE_IS_ZERO(pPage))
800 {
801 void *pv;
802 PPGMPAGEMAP pMap;
803 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
804 if (RT_FAILURE(rc))
805 return rc;
806 pTlbe->pMap = pMap;
807 pTlbe->pv = pv;
808 }
809 else
810 {
811 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
812 pTlbe->pMap = NULL;
813 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
814 }
815 pTlbe->pPage = pPage;
816 return VINF_SUCCESS;
817}
818#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
819
820
821/**
822 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
823 * own the PGM lock and therefore not need to lock the mapped page.
824 *
825 * @returns VBox status code.
826 * @retval VINF_SUCCESS on success.
827 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
828 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
829 *
830 * @param pVM The VM handle.
831 * @param GCPhys The guest physical address of the page that should be mapped.
832 * @param pPage Pointer to the PGMPAGE structure for the page.
833 * @param ppv Where to store the address corresponding to GCPhys.
834 *
835 * @internal
836 */
837int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
838{
839 int rc;
840 AssertReturn(pPage, VERR_INTERNAL_ERROR);
841 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
842
843 /*
844 * Make sure the page is writable.
845 */
846 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
847 {
848 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
849 if (RT_FAILURE(rc))
850 return rc;
851 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
852 }
853 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
854
855 /*
856 * Get the mapping address.
857 */
858#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
859 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
860#else
861 PPGMPAGEMAPTLBE pTlbe;
862 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
863 if (RT_FAILURE(rc))
864 return rc;
865 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
866#endif
867 return VINF_SUCCESS;
868}
869
870
871/**
872 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
873 * own the PGM lock and therefore not need to lock the mapped page.
874 *
875 * @returns VBox status code.
876 * @retval VINF_SUCCESS on success.
877 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
878 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
879 *
880 * @param pVM The VM handle.
881 * @param GCPhys The guest physical address of the page that should be mapped.
882 * @param pPage Pointer to the PGMPAGE structure for the page.
883 * @param ppv Where to store the address corresponding to GCPhys.
884 *
885 * @internal
886 */
887int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
888{
889 AssertReturn(pPage, VERR_INTERNAL_ERROR);
890 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
891 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
892
893 /*
894 * Get the mapping address.
895 */
896#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
897 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
898#else
899 PPGMPAGEMAPTLBE pTlbe;
900 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
901 if (RT_FAILURE(rc))
902 return rc;
903 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
904#endif
905 return VINF_SUCCESS;
906}
907
908
909/**
910 * Requests the mapping of a guest page into the current context.
911 *
912 * This API should only be used for very short term, as it will consume
913 * scarse resources (R0 and GC) in the mapping cache. When you're done
914 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
915 *
916 * This API will assume your intention is to write to the page, and will
917 * therefore replace shared and zero pages. If you do not intend to modify
918 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
919 *
920 * @returns VBox status code.
921 * @retval VINF_SUCCESS on success.
922 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
923 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
924 *
925 * @param pVM The VM handle.
926 * @param GCPhys The guest physical address of the page that should be mapped.
927 * @param ppv Where to store the address corresponding to GCPhys.
928 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
929 *
930 * @remarks The caller is responsible for dealing with access handlers.
931 * @todo Add an informational return code for pages with access handlers?
932 *
933 * @remark Avoid calling this API from within critical sections (other than the
934 * PGM one) because of the deadlock risk. External threads may need to
935 * delegate jobs to the EMTs.
936 * @thread Any thread.
937 */
938VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
939{
940#ifdef VBOX_WITH_NEW_PHYS_CODE
941# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
942
943 /*
944 * Find the page and make sure it's writable.
945 */
946 PPGMPAGE pPage;
947 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
948 if (RT_SUCCESS(rc))
949 {
950 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
951 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
952 if (RT_SUCCESS(rc))
953 {
954 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
955#if 0
956 pLock->pvMap = 0;
957 pLock->pvPage = pPage;
958#else
959 pLock->u32Dummy = UINT32_MAX;
960#endif
961 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
962 rc = VINF_SUCCESS;
963 }
964 }
965
966# else /* IN_RING3 || IN_RING0 */
967 int rc = pgmLock(pVM);
968 AssertRCReturn(rc, rc);
969
970 /*
971 * Query the Physical TLB entry for the page (may fail).
972 */
973 PPGMPAGEMAPTLBE pTlbe;
974 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
975 if (RT_SUCCESS(rc))
976 {
977 /*
978 * If the page is shared, the zero page, or being write monitored
979 * it must be converted to an page that's writable if possible.
980 */
981 PPGMPAGE pPage = pTlbe->pPage;
982 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
983 {
984 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
985 if (RT_SUCCESS(rc))
986 {
987 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
988 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
989 }
990 }
991 if (RT_SUCCESS(rc))
992 {
993 /*
994 * Now, just perform the locking and calculate the return address.
995 */
996 PPGMPAGEMAP pMap = pTlbe->pMap;
997 if (pMap)
998 pMap->cRefs++;
999#if 0 /** @todo implement locking properly */
1000 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
1001 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
1002 {
1003 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
1004 if (pMap)
1005 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1006 }
1007#endif
1008 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1009 pLock->pvPage = pPage;
1010 pLock->pvMap = pMap;
1011 }
1012 }
1013
1014 pgmUnlock(pVM);
1015#endif /* IN_RING3 || IN_RING0 */
1016 return rc;
1017
1018#else
1019 /*
1020 * Temporary fallback code.
1021 */
1022# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1023/** @todo @bugref{3202}: check up this path. */
1024 return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
1025# else
1026 return PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1, (PRTR3PTR)ppv);
1027# endif
1028#endif
1029}
1030
1031
1032/**
1033 * Requests the mapping of a guest page into the current context.
1034 *
1035 * This API should only be used for very short term, as it will consume
1036 * scarse resources (R0 and GC) in the mapping cache. When you're done
1037 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1038 *
1039 * @returns VBox status code.
1040 * @retval VINF_SUCCESS on success.
1041 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1042 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1043 *
1044 * @param pVM The VM handle.
1045 * @param GCPhys The guest physical address of the page that should be mapped.
1046 * @param ppv Where to store the address corresponding to GCPhys.
1047 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1048 *
1049 * @remarks The caller is responsible for dealing with access handlers.
1050 * @todo Add an informational return code for pages with access handlers?
1051 *
1052 * @remark Avoid calling this API from within critical sections (other than
1053 * the PGM one) because of the deadlock risk.
1054 * @thread Any thread.
1055 */
1056VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1057{
1058#ifdef VBOX_WITH_NEW_PHYS_CODE
1059# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1060
1061 /*
1062 * Find the page and make sure it's readable.
1063 */
1064 PPGMPAGE pPage;
1065 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1066 if (RT_SUCCESS(rc))
1067 {
1068 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1069 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1070 else
1071 {
1072 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1073#if 0
1074 pLock->pvMap = 0;
1075 pLock->pvPage = pPage;
1076#else
1077 pLock->u32Dummy = UINT32_MAX;
1078#endif
1079 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1080 rc = VINF_SUCCESS;
1081 }
1082 }
1083
1084# else /* IN_RING3 || IN_RING0 */
1085 int rc = pgmLock(pVM);
1086 AssertRCReturn(rc, rc);
1087
1088 /*
1089 * Query the Physical TLB entry for the page (may fail).
1090 */
1091 PPGMPAGEMAPTLBE pTlbe;
1092 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1093 if (RT_SUCCESS(rc))
1094 {
1095 /* MMIO pages doesn't have any readable backing. */
1096 PPGMPAGE pPage = pTlbe->pPage;
1097 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1098 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1099 else
1100 {
1101 /*
1102 * Now, just perform the locking and calculate the return address.
1103 */
1104 PPGMPAGEMAP pMap = pTlbe->pMap;
1105 if (pMap)
1106 pMap->cRefs++;
1107#if 0 /** @todo implement locking properly */
1108 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
1109 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
1110 {
1111 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
1112 if (pMap)
1113 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1114 }
1115#endif
1116 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1117 pLock->pvPage = pPage;
1118 pLock->pvMap = pMap;
1119 }
1120 }
1121
1122 pgmUnlock(pVM);
1123#endif /* IN_RING3 || IN_RING0 */
1124 return rc;
1125
1126#else /* !VBOX_WITH_NEW_PHYS_CODE */
1127 /*
1128 * Fallback code.
1129 */
1130 return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
1131#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1132}
1133
1134
1135/**
1136 * Requests the mapping of a guest page given by virtual address into the current context.
1137 *
1138 * This API should only be used for very short term, as it will consume
1139 * scarse resources (R0 and GC) in the mapping cache. When you're done
1140 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1141 *
1142 * This API will assume your intention is to write to the page, and will
1143 * therefore replace shared and zero pages. If you do not intend to modify
1144 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1145 *
1146 * @returns VBox status code.
1147 * @retval VINF_SUCCESS on success.
1148 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1149 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1150 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1151 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1152 *
1153 * @param pVM The VM handle.
1154 * @param GCPhys The guest physical address of the page that should be mapped.
1155 * @param ppv Where to store the address corresponding to GCPhys.
1156 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1157 *
1158 * @remark Avoid calling this API from within critical sections (other than
1159 * the PGM one) because of the deadlock risk.
1160 * @thread EMT
1161 */
1162VMMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1163{
1164 VM_ASSERT_EMT(pVM);
1165 RTGCPHYS GCPhys;
1166 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1167 if (RT_SUCCESS(rc))
1168 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, pLock);
1169 return rc;
1170}
1171
1172
1173/**
1174 * Requests the mapping of a guest page given by virtual address into the current context.
1175 *
1176 * This API should only be used for very short term, as it will consume
1177 * scarse resources (R0 and GC) in the mapping cache. When you're done
1178 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1179 *
1180 * @returns VBox status code.
1181 * @retval VINF_SUCCESS on success.
1182 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1183 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1184 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1185 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1186 *
1187 * @param pVM The VM handle.
1188 * @param GCPhys The guest physical address of the page that should be mapped.
1189 * @param ppv Where to store the address corresponding to GCPhys.
1190 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1191 *
1192 * @remark Avoid calling this API from within critical sections (other than
1193 * the PGM one) because of the deadlock risk.
1194 * @thread EMT
1195 */
1196VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1197{
1198 VM_ASSERT_EMT(pVM);
1199 RTGCPHYS GCPhys;
1200 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1201 if (RT_SUCCESS(rc))
1202 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, pLock);
1203 return rc;
1204}
1205
1206
1207/**
1208 * Release the mapping of a guest page.
1209 *
1210 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1211 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1212 *
1213 * @param pVM The VM handle.
1214 * @param pLock The lock structure initialized by the mapping function.
1215 */
1216VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1217{
1218#ifdef VBOX_WITH_NEW_PHYS_CODE
1219#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1220 /* currently nothing to do here. */
1221 Assert(pLock->u32Dummy == UINT32_MAX);
1222 pLock->u32Dummy = 0;
1223
1224#else /* IN_RING3 */
1225 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1226 if (!pMap)
1227 {
1228 /* The ZERO page and MMIO2 ends up here. */
1229 Assert(pLock->pvPage);
1230 pLock->pvPage = NULL;
1231 }
1232 else
1233 {
1234 pgmLock(pVM);
1235
1236# if 0 /** @todo implement page locking */
1237 PPGMPAGE pPage = (PPGMPAGE)pLock->pvPage;
1238 Assert(pPage->cLocks >= 1);
1239 if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
1240 pPage->cLocks--;
1241# endif
1242
1243 Assert(pMap->cRefs >= 1);
1244 pMap->cRefs--;
1245 pMap->iAge = 0;
1246
1247 pgmUnlock(pVM);
1248 }
1249#endif /* IN_RING3 */
1250#else
1251 NOREF(pVM);
1252 NOREF(pLock);
1253#endif
1254}
1255
1256
1257/**
1258 * Converts a GC physical address to a HC ring-3 pointer.
1259 *
1260 * @returns VINF_SUCCESS on success.
1261 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1262 * page but has no physical backing.
1263 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1264 * GC physical address.
1265 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1266 * a dynamic ram chunk boundary
1267 *
1268 * @param pVM The VM handle.
1269 * @param GCPhys The GC physical address to convert.
1270 * @param cbRange Physical range
1271 * @param pR3Ptr Where to store the R3 pointer on success.
1272 *
1273 * @deprecated Avoid when possible!
1274 */
1275VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1276{
1277#ifdef VBOX_WITH_NEW_PHYS_CODE
1278/** @todo this is kind of hacky and needs some more work. */
1279 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1280
1281 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1282# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1283 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1284# else
1285 pgmLock(pVM);
1286
1287 PPGMRAMRANGE pRam;
1288 PPGMPAGE pPage;
1289 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1290 if (RT_SUCCESS(rc))
1291 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1292
1293 pgmUnlock(pVM);
1294 Assert(rc <= VINF_SUCCESS);
1295 return rc;
1296# endif
1297
1298#else /* !VBOX_WITH_NEW_PHYS_CODE */
1299
1300 if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK))
1301 {
1302 AssertMsgFailed(("%RGp - %RGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
1303 LogRel(("PGMPhysGCPhys2HCPtr %RGp - %RGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
1304 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
1305 }
1306
1307 PPGMRAMRANGE pRam;
1308 PPGMPAGE pPage;
1309 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1310 if (RT_FAILURE(rc))
1311 return rc;
1312
1313#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
1314 if (RT_UNLIKELY(PGM_PAGE_IS_RESERVED(pPage)))
1315 return VERR_PGM_PHYS_PAGE_RESERVED;
1316#endif
1317
1318 RTGCPHYS off = GCPhys - pRam->GCPhys;
1319 if (RT_UNLIKELY(off + cbRange > pRam->cb))
1320 {
1321 AssertMsgFailed(("%RGp - %RGp crosses a chunk boundary!!\n", GCPhys, GCPhys + cbRange));
1322 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
1323 }
1324
1325 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1326 {
1327 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1328#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES this is a rare occurence */
1329 PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(pVM, pRam->paChunkR3Ptrs);
1330 *pR3Ptr = (RTR3PTR)(paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1331#else
1332 *pR3Ptr = (RTR3PTR)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1333#endif
1334 }
1335 else if (RT_LIKELY(pRam->pvR3))
1336 *pR3Ptr = (RTR3PTR)((RTR3UINTPTR)pRam->pvR3 + off);
1337 else
1338 return VERR_PGM_PHYS_PAGE_RESERVED;
1339 return VINF_SUCCESS;
1340#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1341}
1342
1343
1344#ifdef VBOX_STRICT
1345/**
1346 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1347 *
1348 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1349 * @param pVM The VM handle.
1350 * @param GCPhys The GC Physical addresss.
1351 * @param cbRange Physical range.
1352 *
1353 * @deprecated Avoid when possible.
1354 */
1355VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1356{
1357 RTR3PTR R3Ptr;
1358 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1359 if (RT_SUCCESS(rc))
1360 return R3Ptr;
1361 return NIL_RTR3PTR;
1362}
1363#endif /* VBOX_STRICT */
1364
1365
1366/**
1367 * Converts a guest pointer to a GC physical address.
1368 *
1369 * This uses the current CR3/CR0/CR4 of the guest.
1370 *
1371 * @returns VBox status code.
1372 * @param pVM The VM Handle
1373 * @param GCPtr The guest pointer to convert.
1374 * @param pGCPhys Where to store the GC physical address.
1375 */
1376VMMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1377{
1378 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1379 if (pGCPhys && RT_SUCCESS(rc))
1380 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1381 return rc;
1382}
1383
1384
1385/**
1386 * Converts a guest pointer to a HC physical address.
1387 *
1388 * This uses the current CR3/CR0/CR4 of the guest.
1389 *
1390 * @returns VBox status code.
1391 * @param pVM The VM Handle
1392 * @param GCPtr The guest pointer to convert.
1393 * @param pHCPhys Where to store the HC physical address.
1394 */
1395VMMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1396{
1397 RTGCPHYS GCPhys;
1398 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1399 if (RT_SUCCESS(rc))
1400 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1401 return rc;
1402}
1403
1404
1405/**
1406 * Converts a guest pointer to a R3 pointer.
1407 *
1408 * This uses the current CR3/CR0/CR4 of the guest.
1409 *
1410 * @returns VBox status code.
1411 * @param pVM The VM Handle
1412 * @param GCPtr The guest pointer to convert.
1413 * @param pR3Ptr Where to store the R3 virtual address.
1414 *
1415 * @deprecated Don't use this.
1416 */
1417VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVM pVM, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1418{
1419#ifdef VBOX_WITH_NEW_PHYS_CODE
1420 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1421#endif
1422
1423 RTGCPHYS GCPhys;
1424 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1425 if (RT_SUCCESS(rc))
1426 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1427 return rc;
1428}
1429
1430
1431
1432#undef LOG_GROUP
1433#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1434
1435
1436#ifdef IN_RING3
1437/**
1438 * Cache PGMPhys memory access
1439 *
1440 * @param pVM VM Handle.
1441 * @param pCache Cache structure pointer
1442 * @param GCPhys GC physical address
1443 * @param pbHC HC pointer corresponding to physical page
1444 *
1445 * @thread EMT.
1446 */
1447static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1448{
1449 uint32_t iCacheIndex;
1450
1451 Assert(VM_IS_EMT(pVM));
1452
1453 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1454 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1455
1456 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1457
1458 ASMBitSet(&pCache->aEntries, iCacheIndex);
1459
1460 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1461 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1462}
1463#endif /* IN_RING3 */
1464
1465#ifdef VBOX_WITH_NEW_PHYS_CODE
1466
1467/**
1468 * Deals with reading from a page with one or more ALL access handlers.
1469 *
1470 * @returns VBox status code. Can be ignored in ring-3.
1471 * @retval VINF_SUCCESS.
1472 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1473 *
1474 * @param pVM The VM handle.
1475 * @param pPage The page descriptor.
1476 * @param GCPhys The physical address to start reading at.
1477 * @param pvBuf Where to put the bits we read.
1478 * @param cb How much to read - less or equal to a page.
1479 */
1480static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1481{
1482 /*
1483 * The most frequent access here is MMIO and shadowed ROM.
1484 * The current code ASSUMES all these access handlers covers full pages!
1485 */
1486
1487 /*
1488 * Whatever we do we need the source page, map it first.
1489 */
1490 const void *pvSrc = NULL;
1491 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1492 if (RT_FAILURE(rc))
1493 {
1494 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1495 GCPhys, pPage, rc));
1496 memset(pvBuf, 0xff, cb);
1497 return VINF_SUCCESS;
1498 }
1499 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1500
1501 /*
1502 * Deal with any physical handlers.
1503 */
1504 PPGMPHYSHANDLER pPhys = NULL;
1505 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1506 {
1507#ifdef IN_RING3
1508 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1509 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1510 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1511 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1512 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1513 Assert(pPhys->CTX_SUFF(pfnHandler));
1514
1515 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1516 STAM_PROFILE_START(&pPhys->Stat, h);
1517 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pPhys->CTX_SUFF(pvUser));
1518 STAM_PROFILE_STOP(&pPhys->Stat, h);
1519 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1520#else
1521 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1522 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1523 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1524#endif
1525 }
1526
1527 /*
1528 * Deal with any virtual handlers.
1529 */
1530 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1531 {
1532 unsigned iPage;
1533 PPGMVIRTHANDLER pVirt;
1534
1535 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1536 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1537 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1538 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1539 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1540
1541#ifdef IN_RING3
1542 if (pVirt->pfnHandlerR3)
1543 {
1544 if (!pPhys)
1545 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1546 else
1547 Log(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1548 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1549 + (iPage << PAGE_SHIFT)
1550 + (GCPhys & PAGE_OFFSET_MASK);
1551
1552 STAM_PROFILE_START(&pVirt->Stat, h);
1553 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1554 STAM_PROFILE_STOP(&pVirt->Stat, h);
1555 if (rc2 == VINF_SUCCESS)
1556 rc = VINF_SUCCESS;
1557 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1558 }
1559 else
1560 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1561#else
1562 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1563 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1564 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1565#endif
1566 }
1567
1568 /*
1569 * Take the default action.
1570 */
1571 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1572 memcpy(pvBuf, pvSrc, cb);
1573 return rc;
1574}
1575
1576
1577/**
1578 * Read physical memory.
1579 *
1580 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1581 * want to ignore those.
1582 *
1583 * @returns VBox status code. Can be ignored in ring-3.
1584 * @retval VINF_SUCCESS.
1585 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1586 *
1587 * @param pVM VM Handle.
1588 * @param GCPhys Physical address start reading from.
1589 * @param pvBuf Where to put the read bits.
1590 * @param cbRead How many bytes to read.
1591 */
1592VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1593{
1594 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1595 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1596
1597 pgmLock(pVM);
1598
1599 /*
1600 * Copy loop on ram ranges.
1601 */
1602 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1603 for (;;)
1604 {
1605 /* Find range. */
1606 while (pRam && GCPhys > pRam->GCPhysLast)
1607 pRam = pRam->CTX_SUFF(pNext);
1608 /* Inside range or not? */
1609 if (pRam && GCPhys >= pRam->GCPhys)
1610 {
1611 /*
1612 * Must work our way thru this page by page.
1613 */
1614 RTGCPHYS off = GCPhys - pRam->GCPhys;
1615 while (off < pRam->cb)
1616 {
1617 unsigned iPage = off >> PAGE_SHIFT;
1618 PPGMPAGE pPage = &pRam->aPages[iPage];
1619 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1620 if (cb > cbRead)
1621 cb = cbRead;
1622
1623 /*
1624 * Any ALL access handlers?
1625 */
1626 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1627 {
1628 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1629 if (RT_FAILURE(rc))
1630 return rc;
1631 }
1632 else
1633 {
1634 /*
1635 * Get the pointer to the page.
1636 */
1637 const void *pvSrc;
1638 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1639 if (RT_SUCCESS(rc))
1640 memcpy(pvBuf, pvSrc, cb);
1641 else
1642 {
1643 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1644 pRam->GCPhys + off, pPage, rc));
1645 memset(pvBuf, 0xff, cb);
1646 }
1647 }
1648
1649 /* next page */
1650 if (cb >= cbRead)
1651 {
1652 pgmUnlock(pVM);
1653 return VINF_SUCCESS;
1654 }
1655 cbRead -= cb;
1656 off += cb;
1657 pvBuf = (char *)pvBuf + cb;
1658 } /* walk pages in ram range. */
1659
1660 GCPhys = pRam->GCPhysLast + 1;
1661 }
1662 else
1663 {
1664 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1665
1666 /*
1667 * Unassigned address space.
1668 */
1669 if (!pRam)
1670 break;
1671 size_t cb = pRam->GCPhys - GCPhys;
1672 if (cb >= cbRead)
1673 {
1674 memset(pvBuf, 0xff, cbRead);
1675 break;
1676 }
1677 memset(pvBuf, 0xff, cb);
1678
1679 cbRead -= cb;
1680 pvBuf = (char *)pvBuf + cb;
1681 GCPhys += cb;
1682 }
1683 } /* Ram range walk */
1684
1685 pgmUnlock(pVM);
1686 return VINF_SUCCESS;
1687}
1688
1689#else /* Old PGMPhysRead */
1690
1691/**
1692 * Read physical memory.
1693 *
1694 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1695 * want to ignore those.
1696 *
1697 * @param pVM VM Handle.
1698 * @param GCPhys Physical address start reading from.
1699 * @param pvBuf Where to put the read bits.
1700 * @param cbRead How many bytes to read.
1701 */
1702VMMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1703{
1704#ifdef IN_RING3
1705 bool fGrabbedLock = false;
1706#endif
1707
1708 AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n"));
1709 if (cbRead == 0)
1710 return;
1711
1712 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1713
1714#ifdef IN_RING3
1715 if (!VM_IS_EMT(pVM))
1716 {
1717 pgmLock(pVM);
1718 fGrabbedLock = true;
1719 }
1720#endif
1721
1722 /*
1723 * Copy loop on ram ranges.
1724 */
1725 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1726 for (;;)
1727 {
1728 /* Find range. */
1729 while (pRam && GCPhys > pRam->GCPhysLast)
1730 pRam = pRam->CTX_SUFF(pNext);
1731 /* Inside range or not? */
1732 if (pRam && GCPhys >= pRam->GCPhys)
1733 {
1734 /*
1735 * Must work our way thru this page by page.
1736 */
1737 RTGCPHYS off = GCPhys - pRam->GCPhys;
1738 while (off < pRam->cb)
1739 {
1740 unsigned iPage = off >> PAGE_SHIFT;
1741 PPGMPAGE pPage = &pRam->aPages[iPage];
1742 size_t cb;
1743
1744 /* Physical chunk in dynamically allocated range not present? */
1745 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
1746 {
1747 /* Treat it as reserved; return zeros */
1748 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1749 if (cb >= cbRead)
1750 {
1751 memset(pvBuf, 0, cbRead);
1752 goto l_End;
1753 }
1754 memset(pvBuf, 0, cb);
1755 }
1756 /* temp hacks, will be reorganized. */
1757 /*
1758 * Physical handler.
1759 */
1760 else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_ALL)
1761 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
1762 {
1763 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1764 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1765
1766#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1767 /* find and call the handler */
1768 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
1769 if (pNode && pNode->pfnHandlerR3)
1770 {
1771 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1772 if (cbRange < cb)
1773 cb = cbRange;
1774 if (cb > cbRead)
1775 cb = cbRead;
1776
1777 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1778
1779 /* Note! Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1780 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pNode->pvUserR3);
1781 }
1782#endif /* IN_RING3 */
1783 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1784 {
1785#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1786 void *pvSrc = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
1787#else
1788 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1789#endif
1790
1791 if (cb >= cbRead)
1792 {
1793 memcpy(pvBuf, pvSrc, cbRead);
1794 goto l_End;
1795 }
1796 memcpy(pvBuf, pvSrc, cb);
1797 }
1798 else if (cb >= cbRead)
1799 goto l_End;
1800 }
1801 /*
1802 * Virtual handlers.
1803 */
1804 else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_ALL)
1805 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
1806 {
1807 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1808 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1809#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1810 /* Search the whole tree for matching physical addresses (rather expensive!) */
1811 PPGMVIRTHANDLER pNode;
1812 unsigned iPage;
1813 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
1814 if (RT_SUCCESS(rc2) && pNode->pfnHandlerR3)
1815 {
1816 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1817 if (cbRange < cb)
1818 cb = cbRange;
1819 if (cb > cbRead)
1820 cb = cbRead;
1821 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->Core.Key & PAGE_BASE_GC_MASK)
1822 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1823
1824 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1825
1826 /* Note! Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1827 rc = pNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, 0);
1828 }
1829#endif /* IN_RING3 */
1830 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1831 {
1832#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1833 void *pvSrc = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
1834#else
1835 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1836#endif
1837 if (cb >= cbRead)
1838 {
1839 memcpy(pvBuf, pvSrc, cbRead);
1840 goto l_End;
1841 }
1842 memcpy(pvBuf, pvSrc, cb);
1843 }
1844 else if (cb >= cbRead)
1845 goto l_End;
1846 }
1847 else
1848 {
1849 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM)) /** @todo PAGE FLAGS */
1850 {
1851 /*
1852 * Normal memory or ROM.
1853 */
1854 case 0:
1855 case MM_RAM_FLAGS_ROM:
1856 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED:
1857 //case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* = shadow */ - //MMIO2 isn't in the mask.
1858 case MM_RAM_FLAGS_MMIO2: // MMIO2 isn't in the mask.
1859 {
1860#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1861 void *pvSrc = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
1862#else
1863 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1864#endif
1865 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1866 if (cb >= cbRead)
1867 {
1868#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
1869 if (cbRead <= 4 && !fGrabbedLock /* i.e. EMT */)
1870 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphysreadcache, GCPhys, (uint8_t*)pvSrc);
1871#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
1872 memcpy(pvBuf, pvSrc, cbRead);
1873 goto l_End;
1874 }
1875 memcpy(pvBuf, pvSrc, cb);
1876 break;
1877 }
1878
1879 /*
1880 * All reserved, nothing there.
1881 */
1882 case MM_RAM_FLAGS_RESERVED:
1883 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1884 if (cb >= cbRead)
1885 {
1886 memset(pvBuf, 0, cbRead);
1887 goto l_End;
1888 }
1889 memset(pvBuf, 0, cb);
1890 break;
1891
1892 /*
1893 * The rest needs to be taken more carefully.
1894 */
1895 default:
1896#if 1 /** @todo r=bird: Can you do this properly please. */
1897 /** @todo Try MMIO; quick hack */
1898 if (cbRead <= 8 && IOMMMIORead(pVM, GCPhys, (uint32_t *)pvBuf, cbRead) == VINF_SUCCESS)
1899 goto l_End;
1900#endif
1901
1902 /** @todo fix me later. */
1903 AssertReleaseMsgFailed(("Unknown read at %RGp size %u implement the complex physical reading case %RHp\n",
1904 GCPhys, cbRead,
1905 pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM))); /** @todo PAGE FLAGS */
1906 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1907 break;
1908 }
1909 }
1910
1911 cbRead -= cb;
1912 off += cb;
1913 pvBuf = (char *)pvBuf + cb;
1914 }
1915
1916 GCPhys = pRam->GCPhysLast + 1;
1917 }
1918 else
1919 {
1920 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1921
1922 /*
1923 * Unassigned address space.
1924 */
1925 size_t cb;
1926 if ( !pRam
1927 || (cb = pRam->GCPhys - GCPhys) >= cbRead)
1928 {
1929 memset(pvBuf, 0, cbRead);
1930 goto l_End;
1931 }
1932
1933 memset(pvBuf, 0, cb); /** @todo this is wrong, unassigne == 0xff not 0x00! */
1934 cbRead -= cb;
1935 pvBuf = (char *)pvBuf + cb;
1936 GCPhys += cb;
1937 }
1938 }
1939l_End:
1940#ifdef IN_RING3
1941 if (fGrabbedLock)
1942 pgmUnlock(pVM);
1943#endif
1944 return;
1945}
1946
1947#endif /* Old PGMPhysRead */
1948#ifdef VBOX_WITH_NEW_PHYS_CODE
1949
1950/**
1951 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1952 *
1953 * @returns VBox status code. Can be ignored in ring-3.
1954 * @retval VINF_SUCCESS.
1955 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1956 *
1957 * @param pVM The VM handle.
1958 * @param pPage The page descriptor.
1959 * @param GCPhys The physical address to start writing at.
1960 * @param pvBuf What to write.
1961 * @param cbWrite How much to write - less or equal to a page.
1962 */
1963static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1964{
1965 void *pvDst = NULL;
1966 int rc;
1967
1968 /*
1969 * Give priority to physical handlers (like #PF does).
1970 *
1971 * Hope for a lonely physical handler first that covers the whole
1972 * write area. This should be a pretty frequent case with MMIO and
1973 * the heavy usage of full page handlers in the page pool.
1974 */
1975 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1976 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1977 {
1978 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1979 if (pCur)
1980 {
1981 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1982 Assert(pCur->CTX_SUFF(pfnHandler));
1983
1984 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1985 if (cbRange > cbWrite)
1986 cbRange = cbWrite;
1987
1988#ifndef IN_RING3
1989 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1990 NOREF(cbRange);
1991 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1992 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1993
1994#else /* IN_RING3 */
1995 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1996 if (!PGM_PAGE_IS_MMIO(pPage))
1997 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1998 else
1999 rc = VINF_SUCCESS;
2000 if (RT_SUCCESS(rc))
2001 {
2002 STAM_PROFILE_START(&pCur->Stat, h);
2003 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pCur->CTX_SUFF(pvUser));
2004 STAM_PROFILE_STOP(&pCur->Stat, h);
2005 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2006 memcpy(pvDst, pvBuf, cbRange);
2007 else
2008 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2009 }
2010 else
2011 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2012 GCPhys, pPage, rc), rc);
2013 if (RT_LIKELY(cbRange == cbWrite))
2014 return VINF_SUCCESS;
2015
2016 /* more fun to be had below */
2017 cbWrite -= cbRange;
2018 GCPhys += cbRange;
2019 pvBuf = (uint8_t *)pvBuf + cbRange;
2020 pvDst = (uint8_t *)pvDst + cbRange;
2021#endif /* IN_RING3 */
2022 }
2023 /* else: the handler is somewhere else in the page, deal with it below. */
2024 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2025 }
2026 /*
2027 * A virtual handler without any interfering physical handlers.
2028 * Hopefully it'll conver the whole write.
2029 */
2030 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2031 {
2032 unsigned iPage;
2033 PPGMVIRTHANDLER pCur;
2034 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2035 if (RT_SUCCESS(rc))
2036 {
2037 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2038 if (cbRange > cbWrite)
2039 cbRange = cbWrite;
2040
2041#ifndef IN_RING3
2042 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2043 NOREF(cbRange);
2044 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2045 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2046
2047#else /* IN_RING3 */
2048
2049 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2050 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2051 if (RT_SUCCESS(rc))
2052 {
2053 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2054 if (pCur->pfnHandlerR3)
2055 {
2056 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2057 + (iPage << PAGE_SHIFT)
2058 + (GCPhys & PAGE_OFFSET_MASK);
2059
2060 STAM_PROFILE_START(&pCur->Stat, h);
2061 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2062 STAM_PROFILE_STOP(&pCur->Stat, h);
2063 }
2064 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2065 memcpy(pvDst, pvBuf, cbRange);
2066 else
2067 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2068 }
2069 else
2070 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2071 GCPhys, pPage, rc), rc);
2072 if (RT_LIKELY(cbRange == cbWrite))
2073 return VINF_SUCCESS;
2074
2075 /* more fun to be had below */
2076 cbWrite -= cbRange;
2077 GCPhys += cbRange;
2078 pvBuf = (uint8_t *)pvBuf + cbRange;
2079 pvDst = (uint8_t *)pvDst + cbRange;
2080#endif
2081 }
2082 /* else: the handler is somewhere else in the page, deal with it below. */
2083 }
2084
2085 /*
2086 * Deal with all the odd ends.
2087 */
2088
2089 /* We need a writable destination page. */
2090 if (!pvDst)
2091 {
2092 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2093 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2094 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2095 GCPhys, pPage, rc), rc);
2096 }
2097
2098 /* The loop state (big + ugly). */
2099 unsigned iVirtPage = 0;
2100 PPGMVIRTHANDLER pVirt = NULL;
2101 uint32_t offVirt = PAGE_SIZE;
2102 uint32_t offVirtLast = PAGE_SIZE;
2103 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2104
2105 PPGMPHYSHANDLER pPhys = NULL;
2106 uint32_t offPhys = PAGE_SIZE;
2107 uint32_t offPhysLast = PAGE_SIZE;
2108 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2109
2110 /* The loop. */
2111 for (;;)
2112 {
2113 /*
2114 * Find the closest handler at or above GCPhys.
2115 */
2116 if (fMoreVirt && !pVirt)
2117 {
2118 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2119 if (RT_SUCCESS(rc))
2120 {
2121 offVirt = 0;
2122 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2123 }
2124 else
2125 {
2126 PPGMPHYS2VIRTHANDLER pVirtPhys;
2127 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2128 GCPhys, true /* fAbove */);
2129 if ( pVirtPhys
2130 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2131 {
2132 /* ASSUME that pVirtPhys only covers one page. */
2133 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2134 Assert(pVirtPhys->Core.Key > GCPhys);
2135
2136 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2137 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2138 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2139 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2140 }
2141 else
2142 {
2143 pVirt = NULL;
2144 fMoreVirt = false;
2145 offVirt = offVirtLast = PAGE_SIZE;
2146 }
2147 }
2148 }
2149
2150 if (fMorePhys && !pPhys)
2151 {
2152 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2153 if (pPhys)
2154 {
2155 offPhys = 0;
2156 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2157 }
2158 else
2159 {
2160 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2161 GCPhys, true /* fAbove */);
2162 if ( pPhys
2163 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2164 {
2165 offPhys = pPhys->Core.Key - GCPhys;
2166 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2167 }
2168 else
2169 {
2170 pPhys = NULL;
2171 fMorePhys = false;
2172 offPhys = offPhysLast = PAGE_SIZE;
2173 }
2174 }
2175 }
2176
2177 /*
2178 * Handle access to space without handlers (that's easy).
2179 */
2180 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2181 uint32_t cbRange = (uint32_t)cbWrite;
2182 if (offPhys && offVirt)
2183 {
2184 if (cbRange > offPhys)
2185 cbRange = offPhys;
2186 if (cbRange > offVirt)
2187 cbRange = offVirt;
2188 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2189 }
2190 /*
2191 * Physical handler.
2192 */
2193 else if (!offPhys && offVirt)
2194 {
2195 if (cbRange > offPhysLast + 1)
2196 cbRange = offPhysLast + 1;
2197 if (cbRange > offVirt)
2198 cbRange = offVirt;
2199#ifdef IN_RING3
2200 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2201 STAM_PROFILE_START(&pPhys->Stat, h);
2202 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
2203 STAM_PROFILE_STOP(&pPhys->Stat, h);
2204 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
2205 pPhys = NULL;
2206#else
2207 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2208 NOREF(cbRange);
2209 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2210 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2211#endif
2212 }
2213 /*
2214 * Virtual handler.
2215 */
2216 else if (offPhys && !offVirt)
2217 {
2218 if (cbRange > offVirtLast + 1)
2219 cbRange = offVirtLast + 1;
2220 if (cbRange > offPhys)
2221 cbRange = offPhys;
2222#ifdef IN_RING3
2223 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2224 if (pVirt->pfnHandlerR3)
2225 {
2226 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2227 + (iVirtPage << PAGE_SHIFT)
2228 + (GCPhys & PAGE_OFFSET_MASK);
2229 STAM_PROFILE_START(&pVirt->Stat, h);
2230 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2231 STAM_PROFILE_STOP(&pVirt->Stat, h);
2232 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2233 }
2234 pVirt = NULL;
2235#else
2236 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2237 NOREF(cbRange);
2238 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2239 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2240#endif
2241 }
2242 /*
2243 * Both... give the physical one priority.
2244 */
2245 else
2246 {
2247 Assert(!offPhys && !offVirt);
2248 if (cbRange > offVirtLast + 1)
2249 cbRange = offVirtLast + 1;
2250 if (cbRange > offPhysLast + 1)
2251 cbRange = offPhysLast + 1;
2252
2253#ifdef IN_RING3
2254 if (pVirt->pfnHandlerR3)
2255 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2256 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2257
2258 STAM_PROFILE_START(&pPhys->Stat, h);
2259 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
2260 STAM_PROFILE_STOP(&pPhys->Stat, h);
2261 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
2262 if (pVirt->pfnHandlerR3)
2263 {
2264
2265 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2266 + (iVirtPage << PAGE_SHIFT)
2267 + (GCPhys & PAGE_OFFSET_MASK);
2268 STAM_PROFILE_START(&pVirt->Stat, h);
2269 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2270 STAM_PROFILE_STOP(&pVirt->Stat, h);
2271 AssertLogRelMsg(rc2 != VINF_SUCCESS && rc2 != VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2272 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2273 rc = VINF_SUCCESS;
2274 }
2275 pPhys = NULL;
2276 pVirt = NULL;
2277#else
2278 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2279 NOREF(cbRange);
2280 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2281 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2282#endif
2283 }
2284 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2285 memcpy(pvDst, pvBuf, cbRange);
2286
2287 /*
2288 * Advance if we've got more stuff to do.
2289 */
2290 if (cbRange >= cbWrite)
2291 return VINF_SUCCESS;
2292
2293 cbWrite -= cbRange;
2294 GCPhys += cbRange;
2295 pvBuf = (uint8_t *)pvBuf + cbRange;
2296 pvDst = (uint8_t *)pvDst + cbRange;
2297
2298 offPhys -= cbRange;
2299 offPhysLast -= cbRange;
2300 offVirt -= cbRange;
2301 offVirtLast -= cbRange;
2302 }
2303}
2304
2305
2306/**
2307 * Write to physical memory.
2308 *
2309 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2310 * want to ignore those.
2311 *
2312 * @returns VBox status code. Can be ignored in ring-3.
2313 * @retval VINF_SUCCESS.
2314 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2315 *
2316 * @param pVM VM Handle.
2317 * @param GCPhys Physical address to write to.
2318 * @param pvBuf What to write.
2319 * @param cbWrite How many bytes to write.
2320 */
2321VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2322{
2323 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2324 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2325 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2326
2327 pgmLock(pVM);
2328
2329 /*
2330 * Copy loop on ram ranges.
2331 */
2332 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2333 for (;;)
2334 {
2335 /* Find range. */
2336 while (pRam && GCPhys > pRam->GCPhysLast)
2337 pRam = pRam->CTX_SUFF(pNext);
2338 /* Inside range or not? */
2339 if (pRam && GCPhys >= pRam->GCPhys)
2340 {
2341 /*
2342 * Must work our way thru this page by page.
2343 */
2344 RTGCPTR off = GCPhys - pRam->GCPhys;
2345 while (off < pRam->cb)
2346 {
2347 RTGCPTR iPage = off >> PAGE_SHIFT;
2348 PPGMPAGE pPage = &pRam->aPages[iPage];
2349 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2350 if (cb > cbWrite)
2351 cb = cbWrite;
2352
2353 /*
2354 * Any active WRITE or ALL access handlers?
2355 */
2356 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2357 {
2358 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2359 if (RT_FAILURE(rc))
2360 return rc;
2361 }
2362 else
2363 {
2364 /*
2365 * Get the pointer to the page.
2366 */
2367 void *pvDst;
2368 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2369 if (RT_SUCCESS(rc))
2370 memcpy(pvDst, pvBuf, cb);
2371 else
2372 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2373 pRam->GCPhys + off, pPage, rc));
2374 }
2375
2376 /* next page */
2377 if (cb >= cbWrite)
2378 {
2379 pgmUnlock(pVM);
2380 return VINF_SUCCESS;
2381 }
2382
2383 cbWrite -= cb;
2384 off += cb;
2385 pvBuf = (const char *)pvBuf + cb;
2386 } /* walk pages in ram range */
2387
2388 GCPhys = pRam->GCPhysLast + 1;
2389 }
2390 else
2391 {
2392 /*
2393 * Unassigned address space, skip it.
2394 */
2395 if (!pRam)
2396 break;
2397 size_t cb = pRam->GCPhys - GCPhys;
2398 if (cb >= cbWrite)
2399 break;
2400 cbWrite -= cb;
2401 pvBuf = (const char *)pvBuf + cb;
2402 GCPhys += cb;
2403 }
2404 } /* Ram range walk */
2405
2406 pgmUnlock(pVM);
2407 return VINF_SUCCESS;
2408}
2409
2410#else /* Old PGMPhysWrite */
2411
2412/**
2413 * Write to physical memory.
2414 *
2415 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2416 * want to ignore those.
2417 *
2418 * @param pVM VM Handle.
2419 * @param GCPhys Physical address to write to.
2420 * @param pvBuf What to write.
2421 * @param cbWrite How many bytes to write.
2422 */
2423VMMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2424{
2425#ifdef IN_RING3
2426 bool fGrabbedLock = false;
2427#endif
2428
2429 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2430 AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n"));
2431 if (cbWrite == 0)
2432 return;
2433
2434 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2435
2436#ifdef IN_RING3
2437 if (!VM_IS_EMT(pVM))
2438 {
2439 pgmLock(pVM);
2440 fGrabbedLock = true;
2441 }
2442#endif
2443 /*
2444 * Copy loop on ram ranges.
2445 */
2446 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2447 for (;;)
2448 {
2449 /* Find range. */
2450 while (pRam && GCPhys > pRam->GCPhysLast)
2451 pRam = pRam->CTX_SUFF(pNext);
2452 /* Inside range or not? */
2453 if (pRam && GCPhys >= pRam->GCPhys)
2454 {
2455 /*
2456 * Must work our way thru this page by page.
2457 */
2458 RTGCPTR off = GCPhys - pRam->GCPhys;
2459 while (off < pRam->cb)
2460 {
2461 RTGCPTR iPage = off >> PAGE_SHIFT;
2462 PPGMPAGE pPage = &pRam->aPages[iPage];
2463
2464 /* Physical chunk in dynamically allocated range not present? */
2465 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
2466 {
2467 int rc;
2468 RTGCPHYS GCPhysPage = pRam->GCPhys + off;
2469#ifdef IN_RING3
2470 if (fGrabbedLock)
2471 {
2472 pgmUnlock(pVM);
2473 rc = pgmr3PhysGrowRange(pVM, GCPhysPage);
2474 if (rc == VINF_SUCCESS)
2475 PGMPhysWrite(pVM, GCPhysPage, pvBuf, cbWrite); /* try again; can't assume pRam is still valid (paranoia) */
2476 return;
2477 }
2478 rc = pgmr3PhysGrowRange(pVM, GCPhysPage);
2479#else
2480 rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhysPage);
2481#endif
2482 if (rc != VINF_SUCCESS)
2483 goto l_End;
2484 }
2485
2486 size_t cb;
2487 /* temporary hack, will reogranize is later. */
2488 /*
2489 * Virtual handlers
2490 */
2491 if ( PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2492 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
2493 {
2494 if (PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2495 {
2496 /*
2497 * Physical write handler + virtual write handler.
2498 * Consider this a quick workaround for the CSAM + shadow caching problem.
2499 *
2500 * We hand it to the shadow caching first since it requires the unchanged
2501 * data. CSAM will have to put up with it already being changed.
2502 */
2503 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
2504 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2505#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
2506 /* 1. The physical handler */
2507 PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
2508 if (pPhysNode && pPhysNode->pfnHandlerR3)
2509 {
2510 size_t cbRange = pPhysNode->Core.KeyLast - GCPhys + 1;
2511 if (cbRange < cb)
2512 cb = cbRange;
2513 if (cb > cbWrite)
2514 cb = cbWrite;
2515
2516 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2517
2518 /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
2519 rc = pPhysNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pPhysNode->pvUserR3);
2520 }
2521
2522 /* 2. The virtual handler (will see incorrect data) */
2523 PPGMVIRTHANDLER pVirtNode;
2524 unsigned iPage;
2525 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirtNode, &iPage);
2526 if (RT_SUCCESS(rc2) && pVirtNode->pfnHandlerR3)
2527 {
2528 size_t cbRange = pVirtNode->Core.KeyLast - GCPhys + 1;
2529 if (cbRange < cb)
2530 cb = cbRange;
2531 if (cb > cbWrite)
2532 cb = cbWrite;
2533 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirtNode->Core.Key & PAGE_BASE_GC_MASK)
2534 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
2535
2536 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2537
2538 /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
2539 rc2 = pVirtNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
2540 if ( ( rc2 != VINF_PGM_HANDLER_DO_DEFAULT
2541 && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2542 || ( RT_FAILURE(rc2)
2543 && RT_SUCCESS(rc)))
2544 rc = rc2;
2545 }
2546#endif /* IN_RING3 */
2547 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2548 {
2549#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2550 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2551#else
2552 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2553#endif
2554 if (cb >= cbWrite)
2555 {
2556 memcpy(pvDst, pvBuf, cbWrite);
2557 goto l_End;
2558 }
2559 memcpy(pvDst, pvBuf, cb);
2560 }
2561 else if (cb >= cbWrite)
2562 goto l_End;
2563 }
2564 else
2565 {
2566 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
2567 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2568#ifdef IN_RING3
2569/** @todo deal with this in GC and R0! */
2570 /* Search the whole tree for matching physical addresses (rather expensive!) */
2571 PPGMVIRTHANDLER pNode;
2572 unsigned iPage;
2573 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
2574 if (RT_SUCCESS(rc2) && pNode->pfnHandlerR3)
2575 {
2576 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
2577 if (cbRange < cb)
2578 cb = cbRange;
2579 if (cb > cbWrite)
2580 cb = cbWrite;
2581 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->Core.Key & PAGE_BASE_GC_MASK)
2582 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
2583
2584 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2585
2586 /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
2587 rc = pNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
2588 }
2589#endif /* IN_RING3 */
2590 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2591 {
2592#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2593 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2594#else
2595 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2596#endif
2597 if (cb >= cbWrite)
2598 {
2599 memcpy(pvDst, pvBuf, cbWrite);
2600 goto l_End;
2601 }
2602 memcpy(pvDst, pvBuf, cb);
2603 }
2604 else if (cb >= cbWrite)
2605 goto l_End;
2606 }
2607 }
2608 /*
2609 * Physical handler.
2610 */
2611 else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE)
2612 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
2613 {
2614 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
2615 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2616#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
2617 /* find and call the handler */
2618 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
2619 if (pNode && pNode->pfnHandlerR3)
2620 {
2621 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
2622 if (cbRange < cb)
2623 cb = cbRange;
2624 if (cb > cbWrite)
2625 cb = cbWrite;
2626
2627 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2628
2629 /** @todo Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
2630 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pNode->pvUserR3);
2631 }
2632#endif /* IN_RING3 */
2633 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2634 {
2635#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2636 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2637#else
2638 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2639#endif
2640 if (cb >= cbWrite)
2641 {
2642 memcpy(pvDst, pvBuf, cbWrite);
2643 goto l_End;
2644 }
2645 memcpy(pvDst, pvBuf, cb);
2646 }
2647 else if (cb >= cbWrite)
2648 goto l_End;
2649 }
2650 else
2651 {
2652 /** @todo r=bird: missing MM_RAM_FLAGS_ROM here, we shall not allow anyone to overwrite the ROM! */
2653 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
2654 {
2655 /*
2656 * Normal memory, MMIO2 or writable shadow ROM.
2657 */
2658 case 0:
2659 case MM_RAM_FLAGS_MMIO2:
2660 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* shadow rom */
2661 {
2662#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2663 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2664#else
2665 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2666#endif
2667 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2668 if (cb >= cbWrite)
2669 {
2670#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
2671 if (cbWrite <= 4 && !fGrabbedLock /* i.e. EMT */)
2672 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphyswritecache, GCPhys, (uint8_t*)pvDst);
2673#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
2674 memcpy(pvDst, pvBuf, cbWrite);
2675 goto l_End;
2676 }
2677 memcpy(pvDst, pvBuf, cb);
2678 break;
2679 }
2680
2681 /*
2682 * All reserved, nothing there.
2683 */
2684 case MM_RAM_FLAGS_RESERVED:
2685 case MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2:
2686 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2687 if (cb >= cbWrite)
2688 goto l_End;
2689 break;
2690
2691
2692 /*
2693 * The rest needs to be taken more carefully.
2694 */
2695 default:
2696#if 1 /** @todo r=bird: Can you do this properly please. */
2697 /** @todo Try MMIO; quick hack */
2698 if (cbWrite <= 8 && IOMMMIOWrite(pVM, GCPhys, *(uint32_t *)pvBuf, cbWrite) == VINF_SUCCESS)
2699 goto l_End;
2700#endif
2701
2702 /** @todo fix me later. */
2703 AssertReleaseMsgFailed(("Unknown write at %RGp size %u implement the complex physical writing case %RHp\n",
2704 GCPhys, cbWrite,
2705 (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)))); /** @todo PAGE FLAGS */
2706 /* skip the write */
2707 cb = cbWrite;
2708 break;
2709 }
2710 }
2711
2712 cbWrite -= cb;
2713 off += cb;
2714 pvBuf = (const char *)pvBuf + cb;
2715 }
2716
2717 GCPhys = pRam->GCPhysLast + 1;
2718 }
2719 else
2720 {
2721 /*
2722 * Unassigned address space.
2723 */
2724 size_t cb;
2725 if ( !pRam
2726 || (cb = pRam->GCPhys - GCPhys) >= cbWrite)
2727 goto l_End;
2728
2729 cbWrite -= cb;
2730 pvBuf = (const char *)pvBuf + cb;
2731 GCPhys += cb;
2732 }
2733 }
2734l_End:
2735#ifdef IN_RING3
2736 if (fGrabbedLock)
2737 pgmUnlock(pVM);
2738#endif
2739 return;
2740}
2741
2742#endif /* Old PGMPhysWrite */
2743
2744
2745/**
2746 * Read from guest physical memory by GC physical address, bypassing
2747 * MMIO and access handlers.
2748 *
2749 * @returns VBox status.
2750 * @param pVM VM handle.
2751 * @param pvDst The destination address.
2752 * @param GCPhysSrc The source address (GC physical address).
2753 * @param cb The number of bytes to read.
2754 */
2755VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2756{
2757 /*
2758 * Treat the first page as a special case.
2759 */
2760 if (!cb)
2761 return VINF_SUCCESS;
2762
2763 /* map the 1st page */
2764 void const *pvSrc;
2765 PGMPAGEMAPLOCK Lock;
2766 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2767 if (RT_FAILURE(rc))
2768 return rc;
2769
2770 /* optimize for the case where access is completely within the first page. */
2771 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2772 if (RT_LIKELY(cb <= cbPage))
2773 {
2774 memcpy(pvDst, pvSrc, cb);
2775 PGMPhysReleasePageMappingLock(pVM, &Lock);
2776 return VINF_SUCCESS;
2777 }
2778
2779 /* copy to the end of the page. */
2780 memcpy(pvDst, pvSrc, cbPage);
2781 PGMPhysReleasePageMappingLock(pVM, &Lock);
2782 GCPhysSrc += cbPage;
2783 pvDst = (uint8_t *)pvDst + cbPage;
2784 cb -= cbPage;
2785
2786 /*
2787 * Page by page.
2788 */
2789 for (;;)
2790 {
2791 /* map the page */
2792 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2793 if (RT_FAILURE(rc))
2794 return rc;
2795
2796 /* last page? */
2797 if (cb <= PAGE_SIZE)
2798 {
2799 memcpy(pvDst, pvSrc, cb);
2800 PGMPhysReleasePageMappingLock(pVM, &Lock);
2801 return VINF_SUCCESS;
2802 }
2803
2804 /* copy the entire page and advance */
2805 memcpy(pvDst, pvSrc, PAGE_SIZE);
2806 PGMPhysReleasePageMappingLock(pVM, &Lock);
2807 GCPhysSrc += PAGE_SIZE;
2808 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2809 cb -= PAGE_SIZE;
2810 }
2811 /* won't ever get here. */
2812}
2813
2814#ifndef IN_RC /* Ring 0 & 3 only. (Just not needed in GC.) */
2815
2816/**
2817 * Write to guest physical memory referenced by GC pointer.
2818 * Write memory to GC physical address in guest physical memory.
2819 *
2820 * This will bypass MMIO and access handlers.
2821 *
2822 * @returns VBox status.
2823 * @param pVM VM handle.
2824 * @param GCPhysDst The GC physical address of the destination.
2825 * @param pvSrc The source buffer.
2826 * @param cb The number of bytes to write.
2827 */
2828VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2829{
2830 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2831
2832 /*
2833 * Treat the first page as a special case.
2834 */
2835 if (!cb)
2836 return VINF_SUCCESS;
2837
2838 /* map the 1st page */
2839 void *pvDst;
2840 PGMPAGEMAPLOCK Lock;
2841 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2842 if (RT_FAILURE(rc))
2843 return rc;
2844
2845 /* optimize for the case where access is completely within the first page. */
2846 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2847 if (RT_LIKELY(cb <= cbPage))
2848 {
2849 memcpy(pvDst, pvSrc, cb);
2850 PGMPhysReleasePageMappingLock(pVM, &Lock);
2851 return VINF_SUCCESS;
2852 }
2853
2854 /* copy to the end of the page. */
2855 memcpy(pvDst, pvSrc, cbPage);
2856 PGMPhysReleasePageMappingLock(pVM, &Lock);
2857 GCPhysDst += cbPage;
2858 pvSrc = (const uint8_t *)pvSrc + cbPage;
2859 cb -= cbPage;
2860
2861 /*
2862 * Page by page.
2863 */
2864 for (;;)
2865 {
2866 /* map the page */
2867 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2868 if (RT_FAILURE(rc))
2869 return rc;
2870
2871 /* last page? */
2872 if (cb <= PAGE_SIZE)
2873 {
2874 memcpy(pvDst, pvSrc, cb);
2875 PGMPhysReleasePageMappingLock(pVM, &Lock);
2876 return VINF_SUCCESS;
2877 }
2878
2879 /* copy the entire page and advance */
2880 memcpy(pvDst, pvSrc, PAGE_SIZE);
2881 PGMPhysReleasePageMappingLock(pVM, &Lock);
2882 GCPhysDst += PAGE_SIZE;
2883 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2884 cb -= PAGE_SIZE;
2885 }
2886 /* won't ever get here. */
2887}
2888
2889
2890/**
2891 * Read from guest physical memory referenced by GC pointer.
2892 *
2893 * This function uses the current CR3/CR0/CR4 of the guest and will
2894 * bypass access handlers and not set any accessed bits.
2895 *
2896 * @returns VBox status.
2897 * @param pVM VM handle.
2898 * @param pvDst The destination address.
2899 * @param GCPtrSrc The source address (GC pointer).
2900 * @param cb The number of bytes to read.
2901 */
2902VMMDECL(int) PGMPhysSimpleReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2903{
2904 /*
2905 * Treat the first page as a special case.
2906 */
2907 if (!cb)
2908 return VINF_SUCCESS;
2909
2910 /* map the 1st page */
2911 void const *pvSrc;
2912 PGMPAGEMAPLOCK Lock;
2913 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2914 if (RT_FAILURE(rc))
2915 return rc;
2916
2917 /* optimize for the case where access is completely within the first page. */
2918 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2919 if (RT_LIKELY(cb <= cbPage))
2920 {
2921 memcpy(pvDst, pvSrc, cb);
2922 PGMPhysReleasePageMappingLock(pVM, &Lock);
2923 return VINF_SUCCESS;
2924 }
2925
2926 /* copy to the end of the page. */
2927 memcpy(pvDst, pvSrc, cbPage);
2928 PGMPhysReleasePageMappingLock(pVM, &Lock);
2929 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2930 pvDst = (uint8_t *)pvDst + cbPage;
2931 cb -= cbPage;
2932
2933 /*
2934 * Page by page.
2935 */
2936 for (;;)
2937 {
2938 /* map the page */
2939 rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2940 if (RT_FAILURE(rc))
2941 return rc;
2942
2943 /* last page? */
2944 if (cb <= PAGE_SIZE)
2945 {
2946 memcpy(pvDst, pvSrc, cb);
2947 PGMPhysReleasePageMappingLock(pVM, &Lock);
2948 return VINF_SUCCESS;
2949 }
2950
2951 /* copy the entire page and advance */
2952 memcpy(pvDst, pvSrc, PAGE_SIZE);
2953 PGMPhysReleasePageMappingLock(pVM, &Lock);
2954 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2955 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2956 cb -= PAGE_SIZE;
2957 }
2958 /* won't ever get here. */
2959}
2960
2961
2962/**
2963 * Write to guest physical memory referenced by GC pointer.
2964 *
2965 * This function uses the current CR3/CR0/CR4 of the guest and will
2966 * bypass access handlers and not set dirty or accessed bits.
2967 *
2968 * @returns VBox status.
2969 * @param pVM VM handle.
2970 * @param GCPtrDst The destination address (GC pointer).
2971 * @param pvSrc The source address.
2972 * @param cb The number of bytes to write.
2973 */
2974VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2975{
2976 /*
2977 * Treat the first page as a special case.
2978 */
2979 if (!cb)
2980 return VINF_SUCCESS;
2981
2982 /* map the 1st page */
2983 void *pvDst;
2984 PGMPAGEMAPLOCK Lock;
2985 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2986 if (RT_FAILURE(rc))
2987 return rc;
2988
2989 /* optimize for the case where access is completely within the first page. */
2990 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2991 if (RT_LIKELY(cb <= cbPage))
2992 {
2993 memcpy(pvDst, pvSrc, cb);
2994 PGMPhysReleasePageMappingLock(pVM, &Lock);
2995 return VINF_SUCCESS;
2996 }
2997
2998 /* copy to the end of the page. */
2999 memcpy(pvDst, pvSrc, cbPage);
3000 PGMPhysReleasePageMappingLock(pVM, &Lock);
3001 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3002 pvSrc = (const uint8_t *)pvSrc + cbPage;
3003 cb -= cbPage;
3004
3005 /*
3006 * Page by page.
3007 */
3008 for (;;)
3009 {
3010 /* map the page */
3011 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
3012 if (RT_FAILURE(rc))
3013 return rc;
3014
3015 /* last page? */
3016 if (cb <= PAGE_SIZE)
3017 {
3018 memcpy(pvDst, pvSrc, cb);
3019 PGMPhysReleasePageMappingLock(pVM, &Lock);
3020 return VINF_SUCCESS;
3021 }
3022
3023 /* copy the entire page and advance */
3024 memcpy(pvDst, pvSrc, PAGE_SIZE);
3025 PGMPhysReleasePageMappingLock(pVM, &Lock);
3026 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3027 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3028 cb -= PAGE_SIZE;
3029 }
3030 /* won't ever get here. */
3031}
3032
3033
3034/**
3035 * Write to guest physical memory referenced by GC pointer and update the PTE.
3036 *
3037 * This function uses the current CR3/CR0/CR4 of the guest and will
3038 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3039 *
3040 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3041 *
3042 * @returns VBox status.
3043 * @param pVM VM handle.
3044 * @param GCPtrDst The destination address (GC pointer).
3045 * @param pvSrc The source address.
3046 * @param cb The number of bytes to write.
3047 */
3048VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3049{
3050 /*
3051 * Treat the first page as a special case.
3052 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3053 */
3054 if (!cb)
3055 return VINF_SUCCESS;
3056
3057 /* map the 1st page */
3058 void *pvDst;
3059 PGMPAGEMAPLOCK Lock;
3060 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
3061 if (RT_FAILURE(rc))
3062 return rc;
3063
3064 /* optimize for the case where access is completely within the first page. */
3065 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3066 if (RT_LIKELY(cb <= cbPage))
3067 {
3068 memcpy(pvDst, pvSrc, cb);
3069 PGMPhysReleasePageMappingLock(pVM, &Lock);
3070 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3071 return VINF_SUCCESS;
3072 }
3073
3074 /* copy to the end of the page. */
3075 memcpy(pvDst, pvSrc, cbPage);
3076 PGMPhysReleasePageMappingLock(pVM, &Lock);
3077 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3078 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3079 pvSrc = (const uint8_t *)pvSrc + cbPage;
3080 cb -= cbPage;
3081
3082 /*
3083 * Page by page.
3084 */
3085 for (;;)
3086 {
3087 /* map the page */
3088 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
3089 if (RT_FAILURE(rc))
3090 return rc;
3091
3092 /* last page? */
3093 if (cb <= PAGE_SIZE)
3094 {
3095 memcpy(pvDst, pvSrc, cb);
3096 PGMPhysReleasePageMappingLock(pVM, &Lock);
3097 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3098 return VINF_SUCCESS;
3099 }
3100
3101 /* copy the entire page and advance */
3102 memcpy(pvDst, pvSrc, PAGE_SIZE);
3103 PGMPhysReleasePageMappingLock(pVM, &Lock);
3104 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3105 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3106 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3107 cb -= PAGE_SIZE;
3108 }
3109 /* won't ever get here. */
3110}
3111
3112
3113/**
3114 * Read from guest physical memory referenced by GC pointer.
3115 *
3116 * This function uses the current CR3/CR0/CR4 of the guest and will
3117 * respect access handlers and set accessed bits.
3118 *
3119 * @returns VBox status.
3120 * @param pVM VM handle.
3121 * @param pvDst The destination address.
3122 * @param GCPtrSrc The source address (GC pointer).
3123 * @param cb The number of bytes to read.
3124 * @thread The vCPU EMT.
3125 */
3126VMMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3127{
3128 RTGCPHYS GCPhys;
3129 uint64_t fFlags;
3130 int rc;
3131
3132 /*
3133 * Anything to do?
3134 */
3135 if (!cb)
3136 return VINF_SUCCESS;
3137
3138 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3139
3140 /*
3141 * Optimize reads within a single page.
3142 */
3143 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3144 {
3145 /* Convert virtual to physical address + flags */
3146 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3147 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3148 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3149
3150 /* mark the guest page as accessed. */
3151 if (!(fFlags & X86_PTE_A))
3152 {
3153 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3154 AssertRC(rc);
3155 }
3156
3157#ifdef VBOX_WITH_NEW_PHYS_CODE
3158 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3159#else
3160 PGMPhysRead(pVM, GCPhys, pvDst, cb);
3161 return VINF_SUCCESS;
3162#endif
3163 }
3164
3165 /*
3166 * Page by page.
3167 */
3168 for (;;)
3169 {
3170 /* Convert virtual to physical address + flags */
3171 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3172 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3173 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3174
3175 /* mark the guest page as accessed. */
3176 if (!(fFlags & X86_PTE_A))
3177 {
3178 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3179 AssertRC(rc);
3180 }
3181
3182 /* copy */
3183 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3184#ifdef VBOX_WITH_NEW_PHYS_CODE
3185 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3186 if (cbRead >= cb || RT_FAILURE(rc))
3187 return rc;
3188#else
3189 if (cbRead >= cb)
3190 {
3191 PGMPhysRead(pVM, GCPhys, pvDst, cb);
3192 return VINF_SUCCESS;
3193 }
3194 PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3195#endif
3196
3197 /* next */
3198 cb -= cbRead;
3199 pvDst = (uint8_t *)pvDst + cbRead;
3200 GCPtrSrc += cbRead;
3201 }
3202}
3203
3204
3205/**
3206 * Write to guest physical memory referenced by GC pointer.
3207 *
3208 * This function uses the current CR3/CR0/CR4 of the guest and will
3209 * respect access handlers and set dirty and accessed bits.
3210 *
3211 * @returns VBox status.
3212 * @retval VINF_SUCCESS.
3213 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3214 *
3215 * @param pVM VM handle.
3216 * @param GCPtrDst The destination address (GC pointer).
3217 * @param pvSrc The source address.
3218 * @param cb The number of bytes to write.
3219 */
3220VMMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3221{
3222 RTGCPHYS GCPhys;
3223 uint64_t fFlags;
3224 int rc;
3225
3226 /*
3227 * Anything to do?
3228 */
3229 if (!cb)
3230 return VINF_SUCCESS;
3231
3232 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3233
3234 /*
3235 * Optimize writes within a single page.
3236 */
3237 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3238 {
3239 /* Convert virtual to physical address + flags */
3240 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3241 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3242 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3243
3244 /* Mention when we ignore X86_PTE_RW... */
3245 if (!(fFlags & X86_PTE_RW))
3246 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3247
3248 /* Mark the guest page as accessed and dirty if necessary. */
3249 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3250 {
3251 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3252 AssertRC(rc);
3253 }
3254
3255#ifdef VBOX_WITH_NEW_PHYS_CODE
3256 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3257#else
3258 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3259 return VINF_SUCCESS;
3260#endif
3261 }
3262
3263 /*
3264 * Page by page.
3265 */
3266 for (;;)
3267 {
3268 /* Convert virtual to physical address + flags */
3269 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3270 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3271 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3272
3273 /* Mention when we ignore X86_PTE_RW... */
3274 if (!(fFlags & X86_PTE_RW))
3275 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3276
3277 /* Mark the guest page as accessed and dirty if necessary. */
3278 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3279 {
3280 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3281 AssertRC(rc);
3282 }
3283
3284 /* copy */
3285 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3286#ifdef VBOX_WITH_NEW_PHYS_CODE
3287 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3288 if (cbWrite >= cb || RT_FAILURE(rc))
3289 return rc;
3290#else
3291 if (cbWrite >= cb)
3292 {
3293 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3294 return VINF_SUCCESS;
3295 }
3296 PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3297#endif
3298
3299 /* next */
3300 cb -= cbWrite;
3301 pvSrc = (uint8_t *)pvSrc + cbWrite;
3302 GCPtrDst += cbWrite;
3303 }
3304}
3305
3306#endif /* !IN_RC */
3307
3308/**
3309 * Performs a read of guest virtual memory for instruction emulation.
3310 *
3311 * This will check permissions, raise exceptions and update the access bits.
3312 *
3313 * The current implementation will bypass all access handlers. It may later be
3314 * changed to at least respect MMIO.
3315 *
3316 *
3317 * @returns VBox status code suitable to scheduling.
3318 * @retval VINF_SUCCESS if the read was performed successfully.
3319 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3320 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3321 *
3322 * @param pVM The VM handle.
3323 * @param pCtxCore The context core.
3324 * @param pvDst Where to put the bytes we've read.
3325 * @param GCPtrSrc The source address.
3326 * @param cb The number of bytes to read. Not more than a page.
3327 *
3328 * @remark This function will dynamically map physical pages in GC. This may unmap
3329 * mappings done by the caller. Be careful!
3330 */
3331VMMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3332{
3333 Assert(cb <= PAGE_SIZE);
3334
3335/** @todo r=bird: This isn't perfect!
3336 * -# It's not checking for reserved bits being 1.
3337 * -# It's not correctly dealing with the access bit.
3338 * -# It's not respecting MMIO memory or any other access handlers.
3339 */
3340 /*
3341 * 1. Translate virtual to physical. This may fault.
3342 * 2. Map the physical address.
3343 * 3. Do the read operation.
3344 * 4. Set access bits if required.
3345 */
3346 int rc;
3347 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3348 if (cb <= cb1)
3349 {
3350 /*
3351 * Not crossing pages.
3352 */
3353 RTGCPHYS GCPhys;
3354 uint64_t fFlags;
3355 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
3356 if (RT_SUCCESS(rc))
3357 {
3358 /** @todo we should check reserved bits ... */
3359 void *pvSrc;
3360 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
3361 switch (rc)
3362 {
3363 case VINF_SUCCESS:
3364 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3365 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3366 break;
3367 case VERR_PGM_PHYS_PAGE_RESERVED:
3368 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3369 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
3370 break;
3371 default:
3372 return rc;
3373 }
3374
3375 /** @todo access bit emulation isn't 100% correct. */
3376 if (!(fFlags & X86_PTE_A))
3377 {
3378 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3379 AssertRC(rc);
3380 }
3381 return VINF_SUCCESS;
3382 }
3383 }
3384 else
3385 {
3386 /*
3387 * Crosses pages.
3388 */
3389 size_t cb2 = cb - cb1;
3390 uint64_t fFlags1;
3391 RTGCPHYS GCPhys1;
3392 uint64_t fFlags2;
3393 RTGCPHYS GCPhys2;
3394 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
3395 if (RT_SUCCESS(rc))
3396 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3397 if (RT_SUCCESS(rc))
3398 {
3399 /** @todo we should check reserved bits ... */
3400 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3401 void *pvSrc1;
3402 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
3403 switch (rc)
3404 {
3405 case VINF_SUCCESS:
3406 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3407 break;
3408 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3409 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
3410 break;
3411 default:
3412 return rc;
3413 }
3414
3415 void *pvSrc2;
3416 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
3417 switch (rc)
3418 {
3419 case VINF_SUCCESS:
3420 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3421 break;
3422 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3423 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
3424 break;
3425 default:
3426 return rc;
3427 }
3428
3429 if (!(fFlags1 & X86_PTE_A))
3430 {
3431 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3432 AssertRC(rc);
3433 }
3434 if (!(fFlags2 & X86_PTE_A))
3435 {
3436 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3437 AssertRC(rc);
3438 }
3439 return VINF_SUCCESS;
3440 }
3441 }
3442
3443 /*
3444 * Raise a #PF.
3445 */
3446 uint32_t uErr;
3447
3448 /* Get the current privilege level. */
3449 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3450 switch (rc)
3451 {
3452 case VINF_SUCCESS:
3453 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3454 break;
3455
3456 case VERR_PAGE_NOT_PRESENT:
3457 case VERR_PAGE_TABLE_NOT_PRESENT:
3458 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3459 break;
3460
3461 default:
3462 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3463 return rc;
3464 }
3465 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3466 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3467}
3468
3469
3470/**
3471 * Performs a read of guest virtual memory for instruction emulation.
3472 *
3473 * This will check permissions, raise exceptions and update the access bits.
3474 *
3475 * The current implementation will bypass all access handlers. It may later be
3476 * changed to at least respect MMIO.
3477 *
3478 *
3479 * @returns VBox status code suitable to scheduling.
3480 * @retval VINF_SUCCESS if the read was performed successfully.
3481 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3482 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3483 *
3484 * @param pVM The VM handle.
3485 * @param pCtxCore The context core.
3486 * @param pvDst Where to put the bytes we've read.
3487 * @param GCPtrSrc The source address.
3488 * @param cb The number of bytes to read. Not more than a page.
3489 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3490 * an appropriate error status will be returned (no
3491 * informational at all).
3492 *
3493 *
3494 * @remarks Takes the PGM lock.
3495 * @remarks A page fault on the 2nd page of the access will be raised without
3496 * writing the bits on the first page since we're ASSUMING that the
3497 * caller is emulating an instruction access.
3498 * @remarks This function will dynamically map physical pages in GC. This may
3499 * unmap mappings done by the caller. Be careful!
3500 */
3501VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3502{
3503 Assert(cb <= PAGE_SIZE);
3504
3505 /*
3506 * 1. Translate virtual to physical. This may fault.
3507 * 2. Map the physical address.
3508 * 3. Do the read operation.
3509 * 4. Set access bits if required.
3510 */
3511 int rc;
3512 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3513 if (cb <= cb1)
3514 {
3515 /*
3516 * Not crossing pages.
3517 */
3518 RTGCPHYS GCPhys;
3519 uint64_t fFlags;
3520 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
3521 if (RT_SUCCESS(rc))
3522 {
3523 if (1) /** @todo we should check reserved bits ... */
3524 {
3525 const void *pvSrc;
3526 PGMPAGEMAPLOCK Lock;
3527 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3528 switch (rc)
3529 {
3530 case VINF_SUCCESS:
3531 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3532 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3533 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3534 break;
3535 case VERR_PGM_PHYS_PAGE_RESERVED:
3536 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3537 memset(pvDst, 0xff, cb);
3538 break;
3539 default:
3540 AssertMsgFailed(("%Rrc\n", rc));
3541 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3542 return rc;
3543 }
3544 PGMPhysReleasePageMappingLock(pVM, &Lock);
3545
3546 if (!(fFlags & X86_PTE_A))
3547 {
3548 /** @todo access bit emulation isn't 100% correct. */
3549 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3550 AssertRC(rc);
3551 }
3552 return VINF_SUCCESS;
3553 }
3554 }
3555 }
3556 else
3557 {
3558 /*
3559 * Crosses pages.
3560 */
3561 size_t cb2 = cb - cb1;
3562 uint64_t fFlags1;
3563 RTGCPHYS GCPhys1;
3564 uint64_t fFlags2;
3565 RTGCPHYS GCPhys2;
3566 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
3567 if (RT_SUCCESS(rc))
3568 {
3569 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3570 if (RT_SUCCESS(rc))
3571 {
3572 if (1) /** @todo we should check reserved bits ... */
3573 {
3574 const void *pvSrc;
3575 PGMPAGEMAPLOCK Lock;
3576 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3577 switch (rc)
3578 {
3579 case VINF_SUCCESS:
3580 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3581 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3582 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3583 PGMPhysReleasePageMappingLock(pVM, &Lock);
3584 break;
3585 case VERR_PGM_PHYS_PAGE_RESERVED:
3586 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3587 memset(pvDst, 0xff, cb1);
3588 break;
3589 default:
3590 AssertMsgFailed(("%Rrc\n", rc));
3591 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3592 return rc;
3593 }
3594
3595 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3596 switch (rc)
3597 {
3598 case VINF_SUCCESS:
3599 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3600 PGMPhysReleasePageMappingLock(pVM, &Lock);
3601 break;
3602 case VERR_PGM_PHYS_PAGE_RESERVED:
3603 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3604 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3605 break;
3606 default:
3607 AssertMsgFailed(("%Rrc\n", rc));
3608 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3609 return rc;
3610 }
3611
3612 if (!(fFlags1 & X86_PTE_A))
3613 {
3614 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3615 AssertRC(rc);
3616 }
3617 if (!(fFlags2 & X86_PTE_A))
3618 {
3619 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3620 AssertRC(rc);
3621 }
3622 return VINF_SUCCESS;
3623 }
3624 /* sort out which page */
3625 }
3626 else
3627 GCPtrSrc += cb1; /* fault on 2nd page */
3628 }
3629 }
3630
3631 /*
3632 * Raise a #PF if we're allowed to do that.
3633 */
3634 /* Calc the error bits. */
3635 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3636 uint32_t uErr;
3637 switch (rc)
3638 {
3639 case VINF_SUCCESS:
3640 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3641 rc = VERR_ACCESS_DENIED;
3642 break;
3643
3644 case VERR_PAGE_NOT_PRESENT:
3645 case VERR_PAGE_TABLE_NOT_PRESENT:
3646 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3647 break;
3648
3649 default:
3650 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3651 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3652 return rc;
3653 }
3654 if (fRaiseTrap)
3655 {
3656 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3657 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3658 }
3659 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3660 return rc;
3661}
3662
3663
3664/**
3665 * Performs a write to guest virtual memory for instruction emulation.
3666 *
3667 * This will check permissions, raise exceptions and update the dirty and access
3668 * bits.
3669 *
3670 * @returns VBox status code suitable to scheduling.
3671 * @retval VINF_SUCCESS if the read was performed successfully.
3672 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3673 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3674 *
3675 * @param pVM The VM handle.
3676 * @param pCtxCore The context core.
3677 * @param GCPtrDst The destination address.
3678 * @param pvSrc What to write.
3679 * @param cb The number of bytes to write. Not more than a page.
3680 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3681 * an appropriate error status will be returned (no
3682 * informational at all).
3683 *
3684 * @remarks Takes the PGM lock.
3685 * @remarks A page fault on the 2nd page of the access will be raised without
3686 * writing the bits on the first page since we're ASSUMING that the
3687 * caller is emulating an instruction access.
3688 * @remarks This function will dynamically map physical pages in GC. This may
3689 * unmap mappings done by the caller. Be careful!
3690 */
3691VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3692{
3693 Assert(cb <= PAGE_SIZE);
3694
3695 /*
3696 * 1. Translate virtual to physical. This may fault.
3697 * 2. Map the physical address.
3698 * 3. Do the write operation.
3699 * 4. Set access bits if required.
3700 */
3701 int rc;
3702 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3703 if (cb <= cb1)
3704 {
3705 /*
3706 * Not crossing pages.
3707 */
3708 RTGCPHYS GCPhys;
3709 uint64_t fFlags;
3710 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags, &GCPhys);
3711 if (RT_SUCCESS(rc))
3712 {
3713 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3714 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
3715 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3716 {
3717 void *pvDst;
3718 PGMPAGEMAPLOCK Lock;
3719 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3720 switch (rc)
3721 {
3722 case VINF_SUCCESS:
3723 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3724 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3725 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3726 PGMPhysReleasePageMappingLock(pVM, &Lock);
3727 break;
3728 case VERR_PGM_PHYS_PAGE_RESERVED:
3729 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3730 /* bit bucket */
3731 break;
3732 default:
3733 AssertMsgFailed(("%Rrc\n", rc));
3734 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3735 return rc;
3736 }
3737
3738 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3739 {
3740 /** @todo dirty & access bit emulation isn't 100% correct. */
3741 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3742 AssertRC(rc);
3743 }
3744 return VINF_SUCCESS;
3745 }
3746 rc = VERR_ACCESS_DENIED;
3747 }
3748 }
3749 else
3750 {
3751 /*
3752 * Crosses pages.
3753 */
3754 size_t cb2 = cb - cb1;
3755 uint64_t fFlags1;
3756 RTGCPHYS GCPhys1;
3757 uint64_t fFlags2;
3758 RTGCPHYS GCPhys2;
3759 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags1, &GCPhys1);
3760 if (RT_SUCCESS(rc))
3761 {
3762 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3763 if (RT_SUCCESS(rc))
3764 {
3765 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3766 && (fFlags2 & X86_PTE_RW))
3767 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
3768 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) )
3769 {
3770 void *pvDst;
3771 PGMPAGEMAPLOCK Lock;
3772 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3773 switch (rc)
3774 {
3775 case VINF_SUCCESS:
3776 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3777 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3778 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3779 PGMPhysReleasePageMappingLock(pVM, &Lock);
3780 break;
3781 case VERR_PGM_PHYS_PAGE_RESERVED:
3782 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3783 /* bit bucket */
3784 break;
3785 default:
3786 AssertMsgFailed(("%Rrc\n", rc));
3787 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3788 return rc;
3789 }
3790
3791 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3792 switch (rc)
3793 {
3794 case VINF_SUCCESS:
3795 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3796 PGMPhysReleasePageMappingLock(pVM, &Lock);
3797 break;
3798 case VERR_PGM_PHYS_PAGE_RESERVED:
3799 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3800 /* bit bucket */
3801 break;
3802 default:
3803 AssertMsgFailed(("%Rrc\n", rc));
3804 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3805 return rc;
3806 }
3807
3808 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3809 {
3810 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3811 AssertRC(rc);
3812 }
3813 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3814 {
3815 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3816 AssertRC(rc);
3817 }
3818 return VINF_SUCCESS;
3819 }
3820 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3821 GCPtrDst += cb1; /* fault on the 2nd page. */
3822 rc = VERR_ACCESS_DENIED;
3823 }
3824 else
3825 GCPtrDst += cb1; /* fault on the 2nd page. */
3826 }
3827 }
3828
3829 /*
3830 * Raise a #PF if we're allowed to do that.
3831 */
3832 /* Calc the error bits. */
3833 uint32_t uErr;
3834 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3835 switch (rc)
3836 {
3837 case VINF_SUCCESS:
3838 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3839 rc = VERR_ACCESS_DENIED;
3840 break;
3841
3842 case VERR_ACCESS_DENIED:
3843 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3844 break;
3845
3846 case VERR_PAGE_NOT_PRESENT:
3847 case VERR_PAGE_TABLE_NOT_PRESENT:
3848 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3849 break;
3850
3851 default:
3852 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3853 AssertReturn(RT_FAILURE(rc), VERR_INTERNAL_ERROR);
3854 return rc;
3855 }
3856 if (fRaiseTrap)
3857 {
3858 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3859 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3860 }
3861 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3862 return rc;
3863}
3864
3865
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette