VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 18646

Last change on this file since 18646 was 18617, checked in by vboxsync, 16 years ago

PGM,EM: Handle out of memory situations more gracefully - part 1. New debugger commands: .pgmerror and .pgmerroroff.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 139.0 KB
Line 
1/* $Id: PGMAllPhys.cpp 18617 2009-04-01 22:11:29Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Defined Constants And Macros *
24*******************************************************************************/
25/** @def PGM_IGNORE_RAM_FLAGS_RESERVED
26 * Don't respect the MM_RAM_FLAGS_RESERVED flag when converting to HC addresses.
27 *
28 * Since this flag is currently incorrectly kept set for ROM regions we will
29 * have to ignore it for now so we don't break stuff.
30 *
31 * @todo this has been fixed now I believe, remove this hack.
32 */
33#define PGM_IGNORE_RAM_FLAGS_RESERVED
34
35
36/*******************************************************************************
37* Header Files *
38*******************************************************************************/
39#define LOG_GROUP LOG_GROUP_PGM_PHYS
40#include <VBox/pgm.h>
41#include <VBox/trpm.h>
42#include <VBox/vmm.h>
43#include <VBox/iom.h>
44#include <VBox/em.h>
45#include <VBox/rem.h>
46#include "PGMInternal.h"
47#include <VBox/vm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <iprt/assert.h>
51#include <iprt/string.h>
52#include <iprt/asm.h>
53#include <VBox/log.h>
54#ifdef IN_RING3
55# include <iprt/thread.h>
56#endif
57
58
59
60#ifndef IN_RING3
61
62/**
63 * \#PF Handler callback for Guest ROM range write access.
64 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
65 *
66 * @returns VBox status code (appropritate for trap handling and GC return).
67 * @param pVM VM Handle.
68 * @param uErrorCode CPU Error code.
69 * @param pRegFrame Trap register frame.
70 * @param pvFault The fault address (cr2).
71 * @param GCPhysFault The GC physical address corresponding to pvFault.
72 * @param pvUser User argument. Pointer to the ROM range structure.
73 */
74VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
75{
76 int rc;
77#ifdef VBOX_WITH_NEW_PHYS_CODE
78 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
79 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
80 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
81 switch (pRom->aPages[iPage].enmProt)
82 {
83 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
84 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
85 {
86#endif
87 /*
88 * If it's a simple instruction which doesn't change the cpu state
89 * we will simply skip it. Otherwise we'll have to defer it to REM.
90 */
91 uint32_t cbOp;
92 DISCPUSTATE Cpu;
93 rc = EMInterpretDisasOne(pVM, pRegFrame, &Cpu, &cbOp);
94 if ( RT_SUCCESS(rc)
95 && Cpu.mode == CPUMODE_32BIT /** @todo why does this matter? */
96 && !(Cpu.prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
97 {
98 switch (Cpu.opcode)
99 {
100 /** @todo Find other instructions we can safely skip, possibly
101 * adding this kind of detection to DIS or EM. */
102 case OP_MOV:
103 pRegFrame->rip += cbOp;
104 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteHandled);
105 return VINF_SUCCESS;
106 }
107 }
108 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
109 return rc;
110#ifdef VBOX_WITH_NEW_PHYS_CODE
111 break;
112 }
113
114 case PGMROMPROT_READ_RAM_WRITE_RAM:
115 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
116 AssertRC(rc);
117 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
118
119 case PGMROMPROT_READ_ROM_WRITE_RAM:
120 /* Handle it in ring-3 because it's *way* easier there. */
121 break;
122
123 default:
124 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
125 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
126 VERR_INTERNAL_ERROR);
127 }
128#endif
129
130 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteUnhandled);
131 return VINF_EM_RAW_EMULATE_INSTR;
132}
133
134#endif /* IN_RING3 */
135
136/**
137 * Checks if Address Gate 20 is enabled or not.
138 *
139 * @returns true if enabled.
140 * @returns false if disabled.
141 * @param pVM VM handle.
142 */
143VMMDECL(bool) PGMPhysIsA20Enabled(PVM pVM)
144{
145 LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
146 return pVM->pgm.s.fA20Enabled;
147}
148
149
150/**
151 * Validates a GC physical address.
152 *
153 * @returns true if valid.
154 * @returns false if invalid.
155 * @param pVM The VM handle.
156 * @param GCPhys The physical address to validate.
157 */
158VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
159{
160 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
161 return pPage != NULL;
162}
163
164
165/**
166 * Checks if a GC physical address is a normal page,
167 * i.e. not ROM, MMIO or reserved.
168 *
169 * @returns true if normal.
170 * @returns false if invalid, ROM, MMIO or reserved page.
171 * @param pVM The VM handle.
172 * @param GCPhys The physical address to check.
173 */
174VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
175{
176 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
177#ifdef VBOX_WITH_NEW_PHYS_CODE
178 return pPage
179 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
180#else
181 return pPage
182 && !(pPage->HCPhys & (MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2));
183#endif
184}
185
186
187/**
188 * Converts a GC physical address to a HC physical address.
189 *
190 * @returns VINF_SUCCESS on success.
191 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
192 * page but has no physical backing.
193 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
194 * GC physical address.
195 *
196 * @param pVM The VM handle.
197 * @param GCPhys The GC physical address to convert.
198 * @param pHCPhys Where to store the HC physical address on success.
199 */
200VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
201{
202 PPGMPAGE pPage;
203 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
204 if (RT_FAILURE(rc))
205 return rc;
206
207#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
208 if (RT_UNLIKELY(pPage->HCPhys & MM_RAM_FLAGS_RESERVED)) /** @todo PAGE FLAGS */
209 return VERR_PGM_PHYS_PAGE_RESERVED;
210#endif
211
212 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
213 return VINF_SUCCESS;
214}
215
216
217/**
218 * Invalidates the GC page mapping TLB.
219 *
220 * @param pVM The VM handle.
221 */
222VMMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
223{
224 /* later */
225 NOREF(pVM);
226}
227
228
229/**
230 * Invalidates the ring-0 page mapping TLB.
231 *
232 * @param pVM The VM handle.
233 */
234VMMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
235{
236 PGMPhysInvalidatePageR3MapTLB(pVM);
237}
238
239
240/**
241 * Invalidates the ring-3 page mapping TLB.
242 *
243 * @param pVM The VM handle.
244 */
245VMMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
246{
247 pgmLock(pVM);
248 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
249 {
250 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
251 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
252 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
253 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
254 }
255 pgmUnlock(pVM);
256}
257
258
259/**
260 * Makes sure that there is at least one handy page ready for use.
261 *
262 * This will also take the appropriate actions when reaching water-marks.
263 *
264 * @returns VBox status code.
265 * @retval VINF_SUCCESS on success.
266 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
267 *
268 * @param pVM The VM handle.
269 *
270 * @remarks Must be called from within the PGM critical section. It may
271 * nip back to ring-3/0 in some cases.
272 */
273static int pgmPhysEnsureHandyPage(PVM pVM)
274{
275 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
276
277 /*
278 * Do we need to do anything special?
279 */
280#ifdef IN_RING3
281 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
282#else
283 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
284#endif
285 {
286 /*
287 * Allocate pages only if we're out of them, or in ring-3, almost out.
288 */
289#ifdef IN_RING3
290 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
291#else
292 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
293#endif
294 {
295 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
296 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
297#ifdef IN_RING3
298 int rc = PGMR3PhysAllocateHandyPages(pVM);
299#elif defined(IN_RING0)
300 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
301#else
302 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
303#endif
304 if (RT_UNLIKELY(rc != VINF_SUCCESS))
305 {
306 if (RT_FAILURE(rc))
307 return rc;
308 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
309 if (!pVM->pgm.s.cHandyPages)
310 {
311 LogRel(("PGM: no more handy pages!\n"));
312 return VERR_EM_NO_MEMORY;
313 }
314 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
315 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
316#ifdef IN_RING3
317 REMR3NotifyFF(pVM);
318#else
319 VM_FF_SET(pVM, VM_FF_TO_R3); /* paranoia */
320#endif
321 }
322 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
323 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
324 ("%u\n", pVM->pgm.s.cHandyPages),
325 VERR_INTERNAL_ERROR);
326 }
327 else
328 {
329 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
330 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
331#ifndef IN_RING3
332 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
333 {
334 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
335 VM_FF_SET(pVM, VM_FF_TO_R3);
336 }
337#endif
338 }
339 }
340
341 return VINF_SUCCESS;
342}
343
344
345/**
346 * Replace a zero or shared page with new page that we can write to.
347 *
348 * @returns The following VBox status codes.
349 * @retval VINF_SUCCESS on success, pPage is modified.
350 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
351 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
352 *
353 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
354 *
355 * @param pVM The VM address.
356 * @param pPage The physical page tracking structure. This will
357 * be modified on success.
358 * @param GCPhys The address of the page.
359 *
360 * @remarks Must be called from within the PGM critical section. It may
361 * nip back to ring-3/0 in some cases.
362 *
363 * @remarks This function shouldn't really fail, however if it does
364 * it probably means we've screwed up the size of the amount
365 * and/or the low-water mark of handy pages. Or, that some
366 * device I/O is causing a lot of pages to be allocated while
367 * while the host is in a low-memory condition.
368 */
369int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
370{
371 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
372
373 /*
374 * Prereqs.
375 */
376 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
377 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
378 Assert(!PGM_PAGE_IS_MMIO(pPage));
379
380
381 /*
382 * Flush any shadow page table mappings of the page.
383 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
384 */
385 bool fFlushTLBs = false;
386 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
387 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
388
389 /*
390 * Ensure that we've got a page handy, take it and use it.
391 */
392 int rc2 = pgmPhysEnsureHandyPage(pVM);
393 if (RT_FAILURE(rc2))
394 {
395 if (fFlushTLBs)
396 PGM_INVL_GUEST_TLBS();
397 Assert(rc2 == VERR_EM_NO_MEMORY);
398 return rc2;
399 }
400 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
401 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
402 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
403 Assert(!PGM_PAGE_IS_MMIO(pPage));
404
405 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
406 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
407 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
408 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
409 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
410 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
411
412 /*
413 * There are one or two action to be taken the next time we allocate handy pages:
414 * - Tell the GMM (global memory manager) what the page is being used for.
415 * (Speeds up replacement operations - sharing and defragmenting.)
416 * - If the current backing is shared, it must be freed.
417 */
418 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
419 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
420
421 if (PGM_PAGE_IS_SHARED(pPage))
422 {
423 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
424 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
425 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
426
427 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
428 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
429 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
430 pVM->pgm.s.cSharedPages--;
431 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
432 }
433 else
434 {
435 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
436 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
437 pVM->pgm.s.cZeroPages--;
438 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
439 }
440
441 /*
442 * Do the PGMPAGE modifications.
443 */
444 pVM->pgm.s.cPrivatePages++;
445 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
446 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
447 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
448
449 if ( fFlushTLBs
450 && rc != VINF_PGM_GCPHYS_ALIASED)
451 PGM_INVL_GUEST_TLBS();
452 return rc;
453}
454
455
456/**
457 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
458 *
459 * @returns VBox status code.
460 * @retval VINF_SUCCESS on success.
461 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
462 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
463 *
464 * @param pVM The VM address.
465 * @param pPage The physical page tracking structure.
466 * @param GCPhys The address of the page.
467 *
468 * @remarks Called from within the PGM critical section.
469 */
470int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
471{
472 switch (PGM_PAGE_GET_STATE(pPage))
473 {
474 case PGM_PAGE_STATE_WRITE_MONITORED:
475 PGM_PAGE_SET_WRITTEN_TO(pPage);
476 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
477 /* fall thru */
478 default: /* to shut up GCC */
479 case PGM_PAGE_STATE_ALLOCATED:
480 return VINF_SUCCESS;
481
482 /*
483 * Zero pages can be dummy pages for MMIO or reserved memory,
484 * so we need to check the flags before joining cause with
485 * shared page replacement.
486 */
487 case PGM_PAGE_STATE_ZERO:
488 if (PGM_PAGE_IS_MMIO(pPage))
489 return VERR_PGM_PHYS_PAGE_RESERVED;
490 /* fall thru */
491 case PGM_PAGE_STATE_SHARED:
492 return pgmPhysAllocPage(pVM, pPage, GCPhys);
493 }
494}
495
496
497/**
498 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
499 *
500 * @returns VBox status code.
501 * @retval VINF_SUCCESS on success.
502 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
503 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
504 *
505 * @param pVM The VM address.
506 * @param pPage The physical page tracking structure.
507 * @param GCPhys The address of the page.
508 */
509int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
510{
511 int rc = pgmLock(pVM);
512 if (RT_SUCCESS(rc))
513 {
514 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
515 pgmUnlock(pVM);
516 }
517 return rc;
518}
519
520
521/**
522 * Internal usage: Map the page specified by its GMM ID.
523 *
524 * This is similar to pgmPhysPageMap
525 *
526 * @returns VBox status code.
527 *
528 * @param pVM The VM handle.
529 * @param idPage The Page ID.
530 * @param HCPhys The physical address (for RC).
531 * @param ppv Where to store the mapping address.
532 *
533 * @remarks Called from within the PGM critical section.
534 */
535int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
536{
537 /*
538 * Validation.
539 */
540 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
541 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
542 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
543 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
544
545#ifdef IN_RC
546 /*
547 * Map it by HCPhys.
548 */
549 return PGMDynMapHCPage(pVM, HCPhys, ppv);
550
551#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
552 /*
553 * Map it by HCPhys.
554 */
555 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
556
557#else
558 /*
559 * Find/make Chunk TLB entry for the mapping chunk.
560 */
561 PPGMCHUNKR3MAP pMap;
562 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
563 if (pTlbe->idChunk == idChunk)
564 {
565 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
566 pMap = pTlbe->pChunk;
567 }
568 else
569 {
570 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
571
572 /*
573 * Find the chunk, map it if necessary.
574 */
575 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
576 if (!pMap)
577 {
578# ifdef IN_RING0
579 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
580 AssertRCReturn(rc, rc);
581 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
582 Assert(pMap);
583# else
584 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
585 if (RT_FAILURE(rc))
586 return rc;
587# endif
588 }
589
590 /*
591 * Enter it into the Chunk TLB.
592 */
593 pTlbe->idChunk = idChunk;
594 pTlbe->pChunk = pMap;
595 pMap->iAge = 0;
596 }
597
598 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
599 return VINF_SUCCESS;
600#endif
601}
602
603
604/**
605 * Maps a page into the current virtual address space so it can be accessed.
606 *
607 * @returns VBox status code.
608 * @retval VINF_SUCCESS on success.
609 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
610 *
611 * @param pVM The VM address.
612 * @param pPage The physical page tracking structure.
613 * @param GCPhys The address of the page.
614 * @param ppMap Where to store the address of the mapping tracking structure.
615 * @param ppv Where to store the mapping address of the page. The page
616 * offset is masked off!
617 *
618 * @remarks Called from within the PGM critical section.
619 */
620int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
621{
622 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect));
623
624#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
625 /*
626 * Just some sketchy GC/R0-darwin code.
627 */
628 *ppMap = NULL;
629 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
630 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
631# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
632 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
633# else
634 PGMDynMapHCPage(pVM, HCPhys, ppv);
635# endif
636 return VINF_SUCCESS;
637
638#else /* IN_RING3 || IN_RING0 */
639
640
641 /*
642 * Special case: ZERO and MMIO2 pages.
643 */
644 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
645 if (idChunk == NIL_GMM_CHUNKID)
646 {
647 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
648 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
649 {
650 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
651 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
652 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
653 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
654 }
655 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
656 {
657 /** @todo deal with aliased MMIO2 pages somehow...
658 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
659 * them, that would also avoid this mess. It would actually be kind of
660 * elegant... */
661 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
662 }
663 else
664 {
665 /** @todo handle MMIO2 */
666 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
667 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
668 ("pPage=%R[pgmpage]\n", pPage),
669 VERR_INTERNAL_ERROR_2);
670 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
671 }
672 *ppMap = NULL;
673 return VINF_SUCCESS;
674 }
675
676 /*
677 * Find/make Chunk TLB entry for the mapping chunk.
678 */
679 PPGMCHUNKR3MAP pMap;
680 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
681 if (pTlbe->idChunk == idChunk)
682 {
683 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
684 pMap = pTlbe->pChunk;
685 }
686 else
687 {
688 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
689
690 /*
691 * Find the chunk, map it if necessary.
692 */
693 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
694 if (!pMap)
695 {
696#ifdef IN_RING0
697 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
698 AssertRCReturn(rc, rc);
699 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
700 Assert(pMap);
701#else
702 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
703 if (RT_FAILURE(rc))
704 return rc;
705#endif
706 }
707
708 /*
709 * Enter it into the Chunk TLB.
710 */
711 pTlbe->idChunk = idChunk;
712 pTlbe->pChunk = pMap;
713 pMap->iAge = 0;
714 }
715
716 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
717 *ppMap = pMap;
718 return VINF_SUCCESS;
719#endif /* IN_RING3 */
720}
721
722
723#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
724/**
725 * Load a guest page into the ring-3 physical TLB.
726 *
727 * @returns VBox status code.
728 * @retval VINF_SUCCESS on success
729 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
730 * @param pPGM The PGM instance pointer.
731 * @param GCPhys The guest physical address in question.
732 */
733int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
734{
735 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
736
737 /*
738 * Find the ram range.
739 * 99.8% of requests are expected to be in the first range.
740 */
741 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
742 RTGCPHYS off = GCPhys - pRam->GCPhys;
743 if (RT_UNLIKELY(off >= pRam->cb))
744 {
745 do
746 {
747 pRam = pRam->CTX_SUFF(pNext);
748 if (!pRam)
749 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
750 off = GCPhys - pRam->GCPhys;
751 } while (off >= pRam->cb);
752 }
753
754 /*
755 * Map the page.
756 * Make a special case for the zero page as it is kind of special.
757 */
758 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
759 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
760 if (!PGM_PAGE_IS_ZERO(pPage))
761 {
762 void *pv;
763 PPGMPAGEMAP pMap;
764 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
765 if (RT_FAILURE(rc))
766 return rc;
767 pTlbe->pMap = pMap;
768 pTlbe->pv = pv;
769 }
770 else
771 {
772 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
773 pTlbe->pMap = NULL;
774 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
775 }
776 pTlbe->pPage = pPage;
777 return VINF_SUCCESS;
778}
779
780
781/**
782 * Load a guest page into the ring-3 physical TLB.
783 *
784 * @returns VBox status code.
785 * @retval VINF_SUCCESS on success
786 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
787 *
788 * @param pPGM The PGM instance pointer.
789 * @param pPage Pointer to the PGMPAGE structure corresponding to
790 * GCPhys.
791 * @param GCPhys The guest physical address in question.
792 */
793int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
794{
795 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
796
797 /*
798 * Map the page.
799 * Make a special case for the zero page as it is kind of special.
800 */
801 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
802 if (!PGM_PAGE_IS_ZERO(pPage))
803 {
804 void *pv;
805 PPGMPAGEMAP pMap;
806 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
807 if (RT_FAILURE(rc))
808 return rc;
809 pTlbe->pMap = pMap;
810 pTlbe->pv = pv;
811 }
812 else
813 {
814 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
815 pTlbe->pMap = NULL;
816 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
817 }
818 pTlbe->pPage = pPage;
819 return VINF_SUCCESS;
820}
821#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
822
823
824/**
825 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
826 * own the PGM lock and therefore not need to lock the mapped page.
827 *
828 * @returns VBox status code.
829 * @retval VINF_SUCCESS on success.
830 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
831 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
832 *
833 * @param pVM The VM handle.
834 * @param GCPhys The guest physical address of the page that should be mapped.
835 * @param pPage Pointer to the PGMPAGE structure for the page.
836 * @param ppv Where to store the address corresponding to GCPhys.
837 *
838 * @internal
839 */
840int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
841{
842 int rc;
843 AssertReturn(pPage, VERR_INTERNAL_ERROR);
844 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
845
846 /*
847 * Make sure the page is writable.
848 */
849 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
850 {
851 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
852 if (RT_FAILURE(rc))
853 return rc;
854 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
855 }
856 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
857
858 /*
859 * Get the mapping address.
860 */
861#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
862 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
863#else
864 PPGMPAGEMAPTLBE pTlbe;
865 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
866 if (RT_FAILURE(rc))
867 return rc;
868 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
869#endif
870 return VINF_SUCCESS;
871}
872
873
874/**
875 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
876 * own the PGM lock and therefore not need to lock the mapped page.
877 *
878 * @returns VBox status code.
879 * @retval VINF_SUCCESS on success.
880 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
881 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
882 *
883 * @param pVM The VM handle.
884 * @param GCPhys The guest physical address of the page that should be mapped.
885 * @param pPage Pointer to the PGMPAGE structure for the page.
886 * @param ppv Where to store the address corresponding to GCPhys.
887 *
888 * @internal
889 */
890int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
891{
892 AssertReturn(pPage, VERR_INTERNAL_ERROR);
893 Assert(PDMCritSectIsOwner(&pVM->pgm.s.CritSect) || VM_IS_EMT(pVM));
894 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
895
896 /*
897 * Get the mapping address.
898 */
899#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
900 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
901#else
902 PPGMPAGEMAPTLBE pTlbe;
903 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
904 if (RT_FAILURE(rc))
905 return rc;
906 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
907#endif
908 return VINF_SUCCESS;
909}
910
911
912/**
913 * Requests the mapping of a guest page into the current context.
914 *
915 * This API should only be used for very short term, as it will consume
916 * scarse resources (R0 and GC) in the mapping cache. When you're done
917 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
918 *
919 * This API will assume your intention is to write to the page, and will
920 * therefore replace shared and zero pages. If you do not intend to modify
921 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
922 *
923 * @returns VBox status code.
924 * @retval VINF_SUCCESS on success.
925 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
926 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
927 *
928 * @param pVM The VM handle.
929 * @param GCPhys The guest physical address of the page that should be mapped.
930 * @param ppv Where to store the address corresponding to GCPhys.
931 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
932 *
933 * @remarks The caller is responsible for dealing with access handlers.
934 * @todo Add an informational return code for pages with access handlers?
935 *
936 * @remark Avoid calling this API from within critical sections (other than the
937 * PGM one) because of the deadlock risk. External threads may need to
938 * delegate jobs to the EMTs.
939 * @thread Any thread.
940 */
941VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
942{
943#ifdef VBOX_WITH_NEW_PHYS_CODE
944# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
945
946 /*
947 * Find the page and make sure it's writable.
948 */
949 PPGMPAGE pPage;
950 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
951 if (RT_SUCCESS(rc))
952 {
953 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
954 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
955 if (RT_SUCCESS(rc))
956 {
957 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
958#if 0
959 pLock->pvMap = 0;
960 pLock->pvPage = pPage;
961#else
962 pLock->u32Dummy = UINT32_MAX;
963#endif
964 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
965 rc = VINF_SUCCESS;
966 }
967 }
968
969# else /* IN_RING3 || IN_RING0 */
970 int rc = pgmLock(pVM);
971 AssertRCReturn(rc, rc);
972
973 /*
974 * Query the Physical TLB entry for the page (may fail).
975 */
976 PPGMPAGEMAPTLBE pTlbe;
977 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
978 if (RT_SUCCESS(rc))
979 {
980 /*
981 * If the page is shared, the zero page, or being write monitored
982 * it must be converted to an page that's writable if possible.
983 */
984 PPGMPAGE pPage = pTlbe->pPage;
985 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
986 {
987 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
988 if (RT_SUCCESS(rc))
989 {
990 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
991 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
992 }
993 }
994 if (RT_SUCCESS(rc))
995 {
996 /*
997 * Now, just perform the locking and calculate the return address.
998 */
999 PPGMPAGEMAP pMap = pTlbe->pMap;
1000 if (pMap)
1001 pMap->cRefs++;
1002#if 0 /** @todo implement locking properly */
1003 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
1004 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
1005 {
1006 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
1007 if (pMap)
1008 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1009 }
1010#endif
1011 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1012 pLock->pvPage = pPage;
1013 pLock->pvMap = pMap;
1014 }
1015 }
1016
1017 pgmUnlock(pVM);
1018#endif /* IN_RING3 || IN_RING0 */
1019 return rc;
1020
1021#else
1022 /*
1023 * Temporary fallback code.
1024 */
1025# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1026/** @todo @bugref{3202}: check up this path. */
1027 return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
1028# else
1029 return PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1, (PRTR3PTR)ppv);
1030# endif
1031#endif
1032}
1033
1034
1035/**
1036 * Requests the mapping of a guest page into the current context.
1037 *
1038 * This API should only be used for very short term, as it will consume
1039 * scarse resources (R0 and GC) in the mapping cache. When you're done
1040 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1041 *
1042 * @returns VBox status code.
1043 * @retval VINF_SUCCESS on success.
1044 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1045 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1046 *
1047 * @param pVM The VM handle.
1048 * @param GCPhys The guest physical address of the page that should be mapped.
1049 * @param ppv Where to store the address corresponding to GCPhys.
1050 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1051 *
1052 * @remarks The caller is responsible for dealing with access handlers.
1053 * @todo Add an informational return code for pages with access handlers?
1054 *
1055 * @remark Avoid calling this API from within critical sections (other than
1056 * the PGM one) because of the deadlock risk.
1057 * @thread Any thread.
1058 */
1059VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1060{
1061#ifdef VBOX_WITH_NEW_PHYS_CODE
1062# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1063
1064 /*
1065 * Find the page and make sure it's readable.
1066 */
1067 PPGMPAGE pPage;
1068 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1069 if (RT_SUCCESS(rc))
1070 {
1071 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1072 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1073 else
1074 {
1075 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1076#if 0
1077 pLock->pvMap = 0;
1078 pLock->pvPage = pPage;
1079#else
1080 pLock->u32Dummy = UINT32_MAX;
1081#endif
1082 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1083 rc = VINF_SUCCESS;
1084 }
1085 }
1086
1087# else /* IN_RING3 || IN_RING0 */
1088 int rc = pgmLock(pVM);
1089 AssertRCReturn(rc, rc);
1090
1091 /*
1092 * Query the Physical TLB entry for the page (may fail).
1093 */
1094 PPGMPAGEMAPTLBE pTlbe;
1095 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1096 if (RT_SUCCESS(rc))
1097 {
1098 /* MMIO pages doesn't have any readable backing. */
1099 PPGMPAGE pPage = pTlbe->pPage;
1100 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1101 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1102 else
1103 {
1104 /*
1105 * Now, just perform the locking and calculate the return address.
1106 */
1107 PPGMPAGEMAP pMap = pTlbe->pMap;
1108 if (pMap)
1109 pMap->cRefs++;
1110#if 0 /** @todo implement locking properly */
1111 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
1112 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
1113 {
1114 AssertMsgFailed(("%RGp is entering permanent locked state!\n", GCPhys));
1115 if (pMap)
1116 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1117 }
1118#endif
1119 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1120 pLock->pvPage = pPage;
1121 pLock->pvMap = pMap;
1122 }
1123 }
1124
1125 pgmUnlock(pVM);
1126#endif /* IN_RING3 || IN_RING0 */
1127 return rc;
1128
1129#else /* !VBOX_WITH_NEW_PHYS_CODE */
1130 /*
1131 * Fallback code.
1132 */
1133 return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
1134#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1135}
1136
1137
1138/**
1139 * Requests the mapping of a guest page given by virtual address into the current context.
1140 *
1141 * This API should only be used for very short term, as it will consume
1142 * scarse resources (R0 and GC) in the mapping cache. When you're done
1143 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1144 *
1145 * This API will assume your intention is to write to the page, and will
1146 * therefore replace shared and zero pages. If you do not intend to modify
1147 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1148 *
1149 * @returns VBox status code.
1150 * @retval VINF_SUCCESS on success.
1151 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1152 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1153 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1154 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1155 *
1156 * @param pVM The VM handle.
1157 * @param GCPhys The guest physical address of the page that should be mapped.
1158 * @param ppv Where to store the address corresponding to GCPhys.
1159 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1160 *
1161 * @remark Avoid calling this API from within critical sections (other than
1162 * the PGM one) because of the deadlock risk.
1163 * @thread EMT
1164 */
1165VMMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1166{
1167 VM_ASSERT_EMT(pVM);
1168 RTGCPHYS GCPhys;
1169 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1170 if (RT_SUCCESS(rc))
1171 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, pLock);
1172 return rc;
1173}
1174
1175
1176/**
1177 * Requests the mapping of a guest page given by virtual address into the current context.
1178 *
1179 * This API should only be used for very short term, as it will consume
1180 * scarse resources (R0 and GC) in the mapping cache. When you're done
1181 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1182 *
1183 * @returns VBox status code.
1184 * @retval VINF_SUCCESS on success.
1185 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1186 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1187 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1188 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1189 *
1190 * @param pVM The VM handle.
1191 * @param GCPhys The guest physical address of the page that should be mapped.
1192 * @param ppv Where to store the address corresponding to GCPhys.
1193 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1194 *
1195 * @remark Avoid calling this API from within critical sections (other than
1196 * the PGM one) because of the deadlock risk.
1197 * @thread EMT
1198 */
1199VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1200{
1201 VM_ASSERT_EMT(pVM);
1202 RTGCPHYS GCPhys;
1203 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
1204 if (RT_SUCCESS(rc))
1205 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, pLock);
1206 return rc;
1207}
1208
1209
1210/**
1211 * Release the mapping of a guest page.
1212 *
1213 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1214 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1215 *
1216 * @param pVM The VM handle.
1217 * @param pLock The lock structure initialized by the mapping function.
1218 */
1219VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1220{
1221#ifdef VBOX_WITH_NEW_PHYS_CODE
1222#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1223 /* currently nothing to do here. */
1224 Assert(pLock->u32Dummy == UINT32_MAX);
1225 pLock->u32Dummy = 0;
1226
1227#else /* IN_RING3 */
1228 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1229 if (!pMap)
1230 {
1231 /* The ZERO page and MMIO2 ends up here. */
1232 Assert(pLock->pvPage);
1233 pLock->pvPage = NULL;
1234 }
1235 else
1236 {
1237 pgmLock(pVM);
1238
1239# if 0 /** @todo implement page locking */
1240 PPGMPAGE pPage = (PPGMPAGE)pLock->pvPage;
1241 Assert(pPage->cLocks >= 1);
1242 if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
1243 pPage->cLocks--;
1244# endif
1245
1246 Assert(pMap->cRefs >= 1);
1247 pMap->cRefs--;
1248 pMap->iAge = 0;
1249
1250 pgmUnlock(pVM);
1251 }
1252#endif /* IN_RING3 */
1253#else
1254 NOREF(pVM);
1255 NOREF(pLock);
1256#endif
1257}
1258
1259
1260/**
1261 * Converts a GC physical address to a HC ring-3 pointer.
1262 *
1263 * @returns VINF_SUCCESS on success.
1264 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1265 * page but has no physical backing.
1266 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1267 * GC physical address.
1268 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1269 * a dynamic ram chunk boundary
1270 *
1271 * @param pVM The VM handle.
1272 * @param GCPhys The GC physical address to convert.
1273 * @param cbRange Physical range
1274 * @param pR3Ptr Where to store the R3 pointer on success.
1275 *
1276 * @deprecated Avoid when possible!
1277 */
1278VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1279{
1280#ifdef VBOX_WITH_NEW_PHYS_CODE
1281/** @todo this is kind of hacky and needs some more work. */
1282 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1283
1284 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1285# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1286 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1287# else
1288 pgmLock(pVM);
1289
1290 PPGMRAMRANGE pRam;
1291 PPGMPAGE pPage;
1292 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1293 if (RT_SUCCESS(rc))
1294 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1295
1296 pgmUnlock(pVM);
1297 Assert(rc <= VINF_SUCCESS);
1298 return rc;
1299# endif
1300
1301#else /* !VBOX_WITH_NEW_PHYS_CODE */
1302
1303 if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK))
1304 {
1305 AssertMsgFailed(("%RGp - %RGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
1306 LogRel(("PGMPhysGCPhys2HCPtr %RGp - %RGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
1307 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
1308 }
1309
1310 PPGMRAMRANGE pRam;
1311 PPGMPAGE pPage;
1312 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1313 if (RT_FAILURE(rc))
1314 return rc;
1315
1316#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
1317 if (RT_UNLIKELY(PGM_PAGE_IS_RESERVED(pPage)))
1318 return VERR_PGM_PHYS_PAGE_RESERVED;
1319#endif
1320
1321 RTGCPHYS off = GCPhys - pRam->GCPhys;
1322 if (RT_UNLIKELY(off + cbRange > pRam->cb))
1323 {
1324 AssertMsgFailed(("%RGp - %RGp crosses a chunk boundary!!\n", GCPhys, GCPhys + cbRange));
1325 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
1326 }
1327
1328 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1329 {
1330 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1331#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES this is a rare occurence */
1332 PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(pVM, pRam->paChunkR3Ptrs);
1333 *pR3Ptr = (RTR3PTR)(paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1334#else
1335 *pR3Ptr = (RTR3PTR)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1336#endif
1337 }
1338 else if (RT_LIKELY(pRam->pvR3))
1339 *pR3Ptr = (RTR3PTR)((RTR3UINTPTR)pRam->pvR3 + off);
1340 else
1341 return VERR_PGM_PHYS_PAGE_RESERVED;
1342 return VINF_SUCCESS;
1343#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1344}
1345
1346
1347#ifdef VBOX_STRICT
1348/**
1349 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1350 *
1351 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1352 * @param pVM The VM handle.
1353 * @param GCPhys The GC Physical addresss.
1354 * @param cbRange Physical range.
1355 *
1356 * @deprecated Avoid when possible.
1357 */
1358VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1359{
1360 RTR3PTR R3Ptr;
1361 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1362 if (RT_SUCCESS(rc))
1363 return R3Ptr;
1364 return NIL_RTR3PTR;
1365}
1366#endif /* VBOX_STRICT */
1367
1368
1369/**
1370 * Converts a guest pointer to a GC physical address.
1371 *
1372 * This uses the current CR3/CR0/CR4 of the guest.
1373 *
1374 * @returns VBox status code.
1375 * @param pVM The VM Handle
1376 * @param GCPtr The guest pointer to convert.
1377 * @param pGCPhys Where to store the GC physical address.
1378 */
1379VMMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1380{
1381 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1382 if (pGCPhys && RT_SUCCESS(rc))
1383 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1384 return rc;
1385}
1386
1387
1388/**
1389 * Converts a guest pointer to a HC physical address.
1390 *
1391 * This uses the current CR3/CR0/CR4 of the guest.
1392 *
1393 * @returns VBox status code.
1394 * @param pVM The VM Handle
1395 * @param GCPtr The guest pointer to convert.
1396 * @param pHCPhys Where to store the HC physical address.
1397 */
1398VMMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1399{
1400 RTGCPHYS GCPhys;
1401 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1402 if (RT_SUCCESS(rc))
1403 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1404 return rc;
1405}
1406
1407
1408/**
1409 * Converts a guest pointer to a R3 pointer.
1410 *
1411 * This uses the current CR3/CR0/CR4 of the guest.
1412 *
1413 * @returns VBox status code.
1414 * @param pVM The VM Handle
1415 * @param GCPtr The guest pointer to convert.
1416 * @param pR3Ptr Where to store the R3 virtual address.
1417 *
1418 * @deprecated Don't use this.
1419 */
1420VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVM pVM, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1421{
1422#ifdef VBOX_WITH_NEW_PHYS_CODE
1423 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1424#endif
1425
1426 RTGCPHYS GCPhys;
1427 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1428 if (RT_SUCCESS(rc))
1429 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1430 return rc;
1431}
1432
1433
1434
1435#undef LOG_GROUP
1436#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1437
1438
1439#ifdef IN_RING3
1440/**
1441 * Cache PGMPhys memory access
1442 *
1443 * @param pVM VM Handle.
1444 * @param pCache Cache structure pointer
1445 * @param GCPhys GC physical address
1446 * @param pbHC HC pointer corresponding to physical page
1447 *
1448 * @thread EMT.
1449 */
1450static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1451{
1452 uint32_t iCacheIndex;
1453
1454 Assert(VM_IS_EMT(pVM));
1455
1456 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1457 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1458
1459 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1460
1461 ASMBitSet(&pCache->aEntries, iCacheIndex);
1462
1463 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1464 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1465}
1466#endif /* IN_RING3 */
1467
1468#ifdef VBOX_WITH_NEW_PHYS_CODE
1469
1470/**
1471 * Deals with reading from a page with one or more ALL access handlers.
1472 *
1473 * @returns VBox status code. Can be ignored in ring-3.
1474 * @retval VINF_SUCCESS.
1475 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1476 *
1477 * @param pVM The VM handle.
1478 * @param pPage The page descriptor.
1479 * @param GCPhys The physical address to start reading at.
1480 * @param pvBuf Where to put the bits we read.
1481 * @param cb How much to read - less or equal to a page.
1482 */
1483static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1484{
1485 /*
1486 * The most frequent access here is MMIO and shadowed ROM.
1487 * The current code ASSUMES all these access handlers covers full pages!
1488 */
1489
1490 /*
1491 * Whatever we do we need the source page, map it first.
1492 */
1493 const void *pvSrc = NULL;
1494 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1495 if (RT_FAILURE(rc))
1496 {
1497 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1498 GCPhys, pPage, rc));
1499 memset(pvBuf, 0xff, cb);
1500 return VINF_SUCCESS;
1501 }
1502 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1503
1504 /*
1505 * Deal with any physical handlers.
1506 */
1507 PPGMPHYSHANDLER pPhys = NULL;
1508 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1509 {
1510#ifdef IN_RING3
1511 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1512 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1513 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1514 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1515 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1516 Assert(pPhys->CTX_SUFF(pfnHandler));
1517
1518 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1519 STAM_PROFILE_START(&pPhys->Stat, h);
1520 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pPhys->CTX_SUFF(pvUser));
1521 STAM_PROFILE_STOP(&pPhys->Stat, h);
1522 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1523#else
1524 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1525 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1526 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1527#endif
1528 }
1529
1530 /*
1531 * Deal with any virtual handlers.
1532 */
1533 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1534 {
1535 unsigned iPage;
1536 PPGMVIRTHANDLER pVirt;
1537
1538 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1539 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1540 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1541 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1542 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1543
1544#ifdef IN_RING3
1545 if (pVirt->pfnHandlerR3)
1546 {
1547 if (!pPhys)
1548 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1549 else
1550 Log(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1551 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1552 + (iPage << PAGE_SHIFT)
1553 + (GCPhys & PAGE_OFFSET_MASK);
1554
1555 STAM_PROFILE_START(&pVirt->Stat, h);
1556 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1557 STAM_PROFILE_STOP(&pVirt->Stat, h);
1558 if (rc2 == VINF_SUCCESS)
1559 rc = VINF_SUCCESS;
1560 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1561 }
1562 else
1563 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1564#else
1565 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1566 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1567 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1568#endif
1569 }
1570
1571 /*
1572 * Take the default action.
1573 */
1574 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1575 memcpy(pvBuf, pvSrc, cb);
1576 return rc;
1577}
1578
1579
1580/**
1581 * Read physical memory.
1582 *
1583 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1584 * want to ignore those.
1585 *
1586 * @returns VBox status code. Can be ignored in ring-3.
1587 * @retval VINF_SUCCESS.
1588 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1589 *
1590 * @param pVM VM Handle.
1591 * @param GCPhys Physical address start reading from.
1592 * @param pvBuf Where to put the read bits.
1593 * @param cbRead How many bytes to read.
1594 */
1595VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1596{
1597 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1598 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1599
1600 pgmLock(pVM);
1601
1602 /*
1603 * Copy loop on ram ranges.
1604 */
1605 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1606 for (;;)
1607 {
1608 /* Find range. */
1609 while (pRam && GCPhys > pRam->GCPhysLast)
1610 pRam = pRam->CTX_SUFF(pNext);
1611 /* Inside range or not? */
1612 if (pRam && GCPhys >= pRam->GCPhys)
1613 {
1614 /*
1615 * Must work our way thru this page by page.
1616 */
1617 RTGCPHYS off = GCPhys - pRam->GCPhys;
1618 while (off < pRam->cb)
1619 {
1620 unsigned iPage = off >> PAGE_SHIFT;
1621 PPGMPAGE pPage = &pRam->aPages[iPage];
1622 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1623 if (cb > cbRead)
1624 cb = cbRead;
1625
1626 /*
1627 * Any ALL access handlers?
1628 */
1629 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1630 {
1631 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1632 if (RT_FAILURE(rc))
1633 return rc;
1634 }
1635 else
1636 {
1637 /*
1638 * Get the pointer to the page.
1639 */
1640 const void *pvSrc;
1641 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1642 if (RT_SUCCESS(rc))
1643 memcpy(pvBuf, pvSrc, cb);
1644 else
1645 {
1646 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1647 pRam->GCPhys + off, pPage, rc));
1648 memset(pvBuf, 0xff, cb);
1649 }
1650 }
1651
1652 /* next page */
1653 if (cb >= cbRead)
1654 {
1655 pgmUnlock(pVM);
1656 return VINF_SUCCESS;
1657 }
1658 cbRead -= cb;
1659 off += cb;
1660 pvBuf = (char *)pvBuf + cb;
1661 } /* walk pages in ram range. */
1662
1663 GCPhys = pRam->GCPhysLast + 1;
1664 }
1665 else
1666 {
1667 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1668
1669 /*
1670 * Unassigned address space.
1671 */
1672 if (!pRam)
1673 break;
1674 size_t cb = pRam->GCPhys - GCPhys;
1675 if (cb >= cbRead)
1676 {
1677 memset(pvBuf, 0xff, cbRead);
1678 break;
1679 }
1680 memset(pvBuf, 0xff, cb);
1681
1682 cbRead -= cb;
1683 pvBuf = (char *)pvBuf + cb;
1684 GCPhys += cb;
1685 }
1686 } /* Ram range walk */
1687
1688 pgmUnlock(pVM);
1689 return VINF_SUCCESS;
1690}
1691
1692#else /* Old PGMPhysRead */
1693
1694/**
1695 * Read physical memory.
1696 *
1697 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1698 * want to ignore those.
1699 *
1700 * @param pVM VM Handle.
1701 * @param GCPhys Physical address start reading from.
1702 * @param pvBuf Where to put the read bits.
1703 * @param cbRead How many bytes to read.
1704 */
1705VMMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1706{
1707#ifdef IN_RING3
1708 bool fGrabbedLock = false;
1709#endif
1710
1711 AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n"));
1712 if (cbRead == 0)
1713 return;
1714
1715 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1716
1717#ifdef IN_RING3
1718 if (!VM_IS_EMT(pVM))
1719 {
1720 pgmLock(pVM);
1721 fGrabbedLock = true;
1722 }
1723#endif
1724
1725 /*
1726 * Copy loop on ram ranges.
1727 */
1728 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1729 for (;;)
1730 {
1731 /* Find range. */
1732 while (pRam && GCPhys > pRam->GCPhysLast)
1733 pRam = pRam->CTX_SUFF(pNext);
1734 /* Inside range or not? */
1735 if (pRam && GCPhys >= pRam->GCPhys)
1736 {
1737 /*
1738 * Must work our way thru this page by page.
1739 */
1740 RTGCPHYS off = GCPhys - pRam->GCPhys;
1741 while (off < pRam->cb)
1742 {
1743 unsigned iPage = off >> PAGE_SHIFT;
1744 PPGMPAGE pPage = &pRam->aPages[iPage];
1745 size_t cb;
1746
1747 /* Physical chunk in dynamically allocated range not present? */
1748 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
1749 {
1750 /* Treat it as reserved; return zeros */
1751 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1752 if (cb >= cbRead)
1753 {
1754 memset(pvBuf, 0, cbRead);
1755 goto l_End;
1756 }
1757 memset(pvBuf, 0, cb);
1758 }
1759 /* temp hacks, will be reorganized. */
1760 /*
1761 * Physical handler.
1762 */
1763 else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_ALL)
1764 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
1765 {
1766 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1767 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1768
1769#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1770 /* find and call the handler */
1771 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
1772 if (pNode && pNode->pfnHandlerR3)
1773 {
1774 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1775 if (cbRange < cb)
1776 cb = cbRange;
1777 if (cb > cbRead)
1778 cb = cbRead;
1779
1780 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1781
1782 /* Note! Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1783 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pNode->pvUserR3);
1784 }
1785#endif /* IN_RING3 */
1786 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1787 {
1788#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1789 void *pvSrc = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
1790#else
1791 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1792#endif
1793
1794 if (cb >= cbRead)
1795 {
1796 memcpy(pvBuf, pvSrc, cbRead);
1797 goto l_End;
1798 }
1799 memcpy(pvBuf, pvSrc, cb);
1800 }
1801 else if (cb >= cbRead)
1802 goto l_End;
1803 }
1804 /*
1805 * Virtual handlers.
1806 */
1807 else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_ALL)
1808 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
1809 {
1810 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1811 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1812#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1813 /* Search the whole tree for matching physical addresses (rather expensive!) */
1814 PPGMVIRTHANDLER pNode;
1815 unsigned iPage;
1816 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
1817 if (RT_SUCCESS(rc2) && pNode->pfnHandlerR3)
1818 {
1819 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1820 if (cbRange < cb)
1821 cb = cbRange;
1822 if (cb > cbRead)
1823 cb = cbRead;
1824 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->Core.Key & PAGE_BASE_GC_MASK)
1825 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1826
1827 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1828
1829 /* Note! Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1830 rc = pNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, 0);
1831 }
1832#endif /* IN_RING3 */
1833 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1834 {
1835#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1836 void *pvSrc = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
1837#else
1838 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1839#endif
1840 if (cb >= cbRead)
1841 {
1842 memcpy(pvBuf, pvSrc, cbRead);
1843 goto l_End;
1844 }
1845 memcpy(pvBuf, pvSrc, cb);
1846 }
1847 else if (cb >= cbRead)
1848 goto l_End;
1849 }
1850 else
1851 {
1852 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM)) /** @todo PAGE FLAGS */
1853 {
1854 /*
1855 * Normal memory or ROM.
1856 */
1857 case 0:
1858 case MM_RAM_FLAGS_ROM:
1859 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED:
1860 //case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* = shadow */ - //MMIO2 isn't in the mask.
1861 case MM_RAM_FLAGS_MMIO2: // MMIO2 isn't in the mask.
1862 {
1863#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1864 void *pvSrc = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
1865#else
1866 void *pvSrc = PGMRAMRANGE_GETHCPTR(pRam, off)
1867#endif
1868 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1869 if (cb >= cbRead)
1870 {
1871#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
1872 if (cbRead <= 4 && !fGrabbedLock /* i.e. EMT */)
1873 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphysreadcache, GCPhys, (uint8_t*)pvSrc);
1874#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
1875 memcpy(pvBuf, pvSrc, cbRead);
1876 goto l_End;
1877 }
1878 memcpy(pvBuf, pvSrc, cb);
1879 break;
1880 }
1881
1882 /*
1883 * All reserved, nothing there.
1884 */
1885 case MM_RAM_FLAGS_RESERVED:
1886 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1887 if (cb >= cbRead)
1888 {
1889 memset(pvBuf, 0, cbRead);
1890 goto l_End;
1891 }
1892 memset(pvBuf, 0, cb);
1893 break;
1894
1895 /*
1896 * The rest needs to be taken more carefully.
1897 */
1898 default:
1899#if 1 /** @todo r=bird: Can you do this properly please. */
1900 /** @todo Try MMIO; quick hack */
1901 if (cbRead <= 8 && IOMMMIORead(pVM, GCPhys, (uint32_t *)pvBuf, cbRead) == VINF_SUCCESS)
1902 goto l_End;
1903#endif
1904
1905 /** @todo fix me later. */
1906 AssertReleaseMsgFailed(("Unknown read at %RGp size %u implement the complex physical reading case %RHp\n",
1907 GCPhys, cbRead,
1908 pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM))); /** @todo PAGE FLAGS */
1909 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1910 break;
1911 }
1912 }
1913
1914 cbRead -= cb;
1915 off += cb;
1916 pvBuf = (char *)pvBuf + cb;
1917 }
1918
1919 GCPhys = pRam->GCPhysLast + 1;
1920 }
1921 else
1922 {
1923 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1924
1925 /*
1926 * Unassigned address space.
1927 */
1928 size_t cb;
1929 if ( !pRam
1930 || (cb = pRam->GCPhys - GCPhys) >= cbRead)
1931 {
1932 memset(pvBuf, 0, cbRead);
1933 goto l_End;
1934 }
1935
1936 memset(pvBuf, 0, cb); /** @todo this is wrong, unassigne == 0xff not 0x00! */
1937 cbRead -= cb;
1938 pvBuf = (char *)pvBuf + cb;
1939 GCPhys += cb;
1940 }
1941 }
1942l_End:
1943#ifdef IN_RING3
1944 if (fGrabbedLock)
1945 pgmUnlock(pVM);
1946#endif
1947 return;
1948}
1949
1950#endif /* Old PGMPhysRead */
1951#ifdef VBOX_WITH_NEW_PHYS_CODE
1952
1953/**
1954 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1955 *
1956 * @returns VBox status code. Can be ignored in ring-3.
1957 * @retval VINF_SUCCESS.
1958 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1959 *
1960 * @param pVM The VM handle.
1961 * @param pPage The page descriptor.
1962 * @param GCPhys The physical address to start writing at.
1963 * @param pvBuf What to write.
1964 * @param cbWrite How much to write - less or equal to a page.
1965 */
1966static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1967{
1968 void *pvDst = NULL;
1969 int rc;
1970
1971 /*
1972 * Give priority to physical handlers (like #PF does).
1973 *
1974 * Hope for a lonely physical handler first that covers the whole
1975 * write area. This should be a pretty frequent case with MMIO and
1976 * the heavy usage of full page handlers in the page pool.
1977 */
1978 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1979 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1980 {
1981 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1982 if (pCur)
1983 {
1984 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1985 Assert(pCur->CTX_SUFF(pfnHandler));
1986
1987 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1988 if (cbRange > cbWrite)
1989 cbRange = cbWrite;
1990
1991#ifndef IN_RING3
1992 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1993 NOREF(cbRange);
1994 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1995 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1996
1997#else /* IN_RING3 */
1998 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1999 if (!PGM_PAGE_IS_MMIO(pPage))
2000 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2001 else
2002 rc = VINF_SUCCESS;
2003 if (RT_SUCCESS(rc))
2004 {
2005 STAM_PROFILE_START(&pCur->Stat, h);
2006 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pCur->CTX_SUFF(pvUser));
2007 STAM_PROFILE_STOP(&pCur->Stat, h);
2008 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2009 memcpy(pvDst, pvBuf, cbRange);
2010 else
2011 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2012 }
2013 else
2014 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2015 GCPhys, pPage, rc), rc);
2016 if (RT_LIKELY(cbRange == cbWrite))
2017 return VINF_SUCCESS;
2018
2019 /* more fun to be had below */
2020 cbWrite -= cbRange;
2021 GCPhys += cbRange;
2022 pvBuf = (uint8_t *)pvBuf + cbRange;
2023 pvDst = (uint8_t *)pvDst + cbRange;
2024#endif /* IN_RING3 */
2025 }
2026 /* else: the handler is somewhere else in the page, deal with it below. */
2027 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2028 }
2029 /*
2030 * A virtual handler without any interfering physical handlers.
2031 * Hopefully it'll conver the whole write.
2032 */
2033 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2034 {
2035 unsigned iPage;
2036 PPGMVIRTHANDLER pCur;
2037 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2038 if (RT_SUCCESS(rc))
2039 {
2040 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2041 if (cbRange > cbWrite)
2042 cbRange = cbWrite;
2043
2044#ifndef IN_RING3
2045 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2046 NOREF(cbRange);
2047 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2048 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2049
2050#else /* IN_RING3 */
2051
2052 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2053 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2054 if (RT_SUCCESS(rc))
2055 {
2056 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2057 if (pCur->pfnHandlerR3)
2058 {
2059 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2060 + (iPage << PAGE_SHIFT)
2061 + (GCPhys & PAGE_OFFSET_MASK);
2062
2063 STAM_PROFILE_START(&pCur->Stat, h);
2064 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2065 STAM_PROFILE_STOP(&pCur->Stat, h);
2066 }
2067 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2068 memcpy(pvDst, pvBuf, cbRange);
2069 else
2070 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2071 }
2072 else
2073 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2074 GCPhys, pPage, rc), rc);
2075 if (RT_LIKELY(cbRange == cbWrite))
2076 return VINF_SUCCESS;
2077
2078 /* more fun to be had below */
2079 cbWrite -= cbRange;
2080 GCPhys += cbRange;
2081 pvBuf = (uint8_t *)pvBuf + cbRange;
2082 pvDst = (uint8_t *)pvDst + cbRange;
2083#endif
2084 }
2085 /* else: the handler is somewhere else in the page, deal with it below. */
2086 }
2087
2088 /*
2089 * Deal with all the odd ends.
2090 */
2091
2092 /* We need a writable destination page. */
2093 if (!pvDst)
2094 {
2095 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2096 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2097 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2098 GCPhys, pPage, rc), rc);
2099 }
2100
2101 /* The loop state (big + ugly). */
2102 unsigned iVirtPage = 0;
2103 PPGMVIRTHANDLER pVirt = NULL;
2104 uint32_t offVirt = PAGE_SIZE;
2105 uint32_t offVirtLast = PAGE_SIZE;
2106 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2107
2108 PPGMPHYSHANDLER pPhys = NULL;
2109 uint32_t offPhys = PAGE_SIZE;
2110 uint32_t offPhysLast = PAGE_SIZE;
2111 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2112
2113 /* The loop. */
2114 for (;;)
2115 {
2116 /*
2117 * Find the closest handler at or above GCPhys.
2118 */
2119 if (fMoreVirt && !pVirt)
2120 {
2121 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2122 if (RT_SUCCESS(rc))
2123 {
2124 offVirt = 0;
2125 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2126 }
2127 else
2128 {
2129 PPGMPHYS2VIRTHANDLER pVirtPhys;
2130 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2131 GCPhys, true /* fAbove */);
2132 if ( pVirtPhys
2133 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2134 {
2135 /* ASSUME that pVirtPhys only covers one page. */
2136 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2137 Assert(pVirtPhys->Core.Key > GCPhys);
2138
2139 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2140 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2141 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2142 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2143 }
2144 else
2145 {
2146 pVirt = NULL;
2147 fMoreVirt = false;
2148 offVirt = offVirtLast = PAGE_SIZE;
2149 }
2150 }
2151 }
2152
2153 if (fMorePhys && !pPhys)
2154 {
2155 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2156 if (pPhys)
2157 {
2158 offPhys = 0;
2159 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2160 }
2161 else
2162 {
2163 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2164 GCPhys, true /* fAbove */);
2165 if ( pPhys
2166 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2167 {
2168 offPhys = pPhys->Core.Key - GCPhys;
2169 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2170 }
2171 else
2172 {
2173 pPhys = NULL;
2174 fMorePhys = false;
2175 offPhys = offPhysLast = PAGE_SIZE;
2176 }
2177 }
2178 }
2179
2180 /*
2181 * Handle access to space without handlers (that's easy).
2182 */
2183 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2184 uint32_t cbRange = (uint32_t)cbWrite;
2185 if (offPhys && offVirt)
2186 {
2187 if (cbRange > offPhys)
2188 cbRange = offPhys;
2189 if (cbRange > offVirt)
2190 cbRange = offVirt;
2191 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2192 }
2193 /*
2194 * Physical handler.
2195 */
2196 else if (!offPhys && offVirt)
2197 {
2198 if (cbRange > offPhysLast + 1)
2199 cbRange = offPhysLast + 1;
2200 if (cbRange > offVirt)
2201 cbRange = offVirt;
2202#ifdef IN_RING3
2203 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2204 STAM_PROFILE_START(&pPhys->Stat, h);
2205 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
2206 STAM_PROFILE_STOP(&pPhys->Stat, h);
2207 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
2208 pPhys = NULL;
2209#else
2210 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2211 NOREF(cbRange);
2212 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2213 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2214#endif
2215 }
2216 /*
2217 * Virtual handler.
2218 */
2219 else if (offPhys && !offVirt)
2220 {
2221 if (cbRange > offVirtLast + 1)
2222 cbRange = offVirtLast + 1;
2223 if (cbRange > offPhys)
2224 cbRange = offPhys;
2225#ifdef IN_RING3
2226 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2227 if (pVirt->pfnHandlerR3)
2228 {
2229 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2230 + (iVirtPage << PAGE_SHIFT)
2231 + (GCPhys & PAGE_OFFSET_MASK);
2232 STAM_PROFILE_START(&pVirt->Stat, h);
2233 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2234 STAM_PROFILE_STOP(&pVirt->Stat, h);
2235 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2236 }
2237 pVirt = NULL;
2238#else
2239 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2240 NOREF(cbRange);
2241 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2242 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2243#endif
2244 }
2245 /*
2246 * Both... give the physical one priority.
2247 */
2248 else
2249 {
2250 Assert(!offPhys && !offVirt);
2251 if (cbRange > offVirtLast + 1)
2252 cbRange = offVirtLast + 1;
2253 if (cbRange > offPhysLast + 1)
2254 cbRange = offPhysLast + 1;
2255
2256#ifdef IN_RING3
2257 if (pVirt->pfnHandlerR3)
2258 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2259 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2260
2261 STAM_PROFILE_START(&pPhys->Stat, h);
2262 rc = pPhys->CTX_SUFF(pfnHandler)(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pPhys->CTX_SUFF(pvUser));
2263 STAM_PROFILE_STOP(&pPhys->Stat, h);
2264 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pPhys->pszDesc));
2265 if (pVirt->pfnHandlerR3)
2266 {
2267
2268 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2269 + (iVirtPage << PAGE_SHIFT)
2270 + (GCPhys & PAGE_OFFSET_MASK);
2271 STAM_PROFILE_START(&pVirt->Stat, h);
2272 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2273 STAM_PROFILE_STOP(&pVirt->Stat, h);
2274 AssertLogRelMsg(rc2 != VINF_SUCCESS && rc2 != VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2275 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2276 rc = VINF_SUCCESS;
2277 }
2278 pPhys = NULL;
2279 pVirt = NULL;
2280#else
2281 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2282 NOREF(cbRange);
2283 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2284 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2285#endif
2286 }
2287 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2288 memcpy(pvDst, pvBuf, cbRange);
2289
2290 /*
2291 * Advance if we've got more stuff to do.
2292 */
2293 if (cbRange >= cbWrite)
2294 return VINF_SUCCESS;
2295
2296 cbWrite -= cbRange;
2297 GCPhys += cbRange;
2298 pvBuf = (uint8_t *)pvBuf + cbRange;
2299 pvDst = (uint8_t *)pvDst + cbRange;
2300
2301 offPhys -= cbRange;
2302 offPhysLast -= cbRange;
2303 offVirt -= cbRange;
2304 offVirtLast -= cbRange;
2305 }
2306}
2307
2308
2309/**
2310 * Write to physical memory.
2311 *
2312 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2313 * want to ignore those.
2314 *
2315 * @returns VBox status code. Can be ignored in ring-3.
2316 * @retval VINF_SUCCESS.
2317 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2318 *
2319 * @param pVM VM Handle.
2320 * @param GCPhys Physical address to write to.
2321 * @param pvBuf What to write.
2322 * @param cbWrite How many bytes to write.
2323 */
2324VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2325{
2326 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2327 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2328 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2329
2330 pgmLock(pVM);
2331
2332 /*
2333 * Copy loop on ram ranges.
2334 */
2335 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2336 for (;;)
2337 {
2338 /* Find range. */
2339 while (pRam && GCPhys > pRam->GCPhysLast)
2340 pRam = pRam->CTX_SUFF(pNext);
2341 /* Inside range or not? */
2342 if (pRam && GCPhys >= pRam->GCPhys)
2343 {
2344 /*
2345 * Must work our way thru this page by page.
2346 */
2347 RTGCPTR off = GCPhys - pRam->GCPhys;
2348 while (off < pRam->cb)
2349 {
2350 RTGCPTR iPage = off >> PAGE_SHIFT;
2351 PPGMPAGE pPage = &pRam->aPages[iPage];
2352 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2353 if (cb > cbWrite)
2354 cb = cbWrite;
2355
2356 /*
2357 * Any active WRITE or ALL access handlers?
2358 */
2359 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2360 {
2361 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2362 if (RT_FAILURE(rc))
2363 return rc;
2364 }
2365 else
2366 {
2367 /*
2368 * Get the pointer to the page.
2369 */
2370 void *pvDst;
2371 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2372 if (RT_SUCCESS(rc))
2373 memcpy(pvDst, pvBuf, cb);
2374 else
2375 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2376 pRam->GCPhys + off, pPage, rc));
2377 }
2378
2379 /* next page */
2380 if (cb >= cbWrite)
2381 {
2382 pgmUnlock(pVM);
2383 return VINF_SUCCESS;
2384 }
2385
2386 cbWrite -= cb;
2387 off += cb;
2388 pvBuf = (const char *)pvBuf + cb;
2389 } /* walk pages in ram range */
2390
2391 GCPhys = pRam->GCPhysLast + 1;
2392 }
2393 else
2394 {
2395 /*
2396 * Unassigned address space, skip it.
2397 */
2398 if (!pRam)
2399 break;
2400 size_t cb = pRam->GCPhys - GCPhys;
2401 if (cb >= cbWrite)
2402 break;
2403 cbWrite -= cb;
2404 pvBuf = (const char *)pvBuf + cb;
2405 GCPhys += cb;
2406 }
2407 } /* Ram range walk */
2408
2409 pgmUnlock(pVM);
2410 return VINF_SUCCESS;
2411}
2412
2413#else /* Old PGMPhysWrite */
2414
2415/**
2416 * Write to physical memory.
2417 *
2418 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2419 * want to ignore those.
2420 *
2421 * @param pVM VM Handle.
2422 * @param GCPhys Physical address to write to.
2423 * @param pvBuf What to write.
2424 * @param cbWrite How many bytes to write.
2425 */
2426VMMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2427{
2428#ifdef IN_RING3
2429 bool fGrabbedLock = false;
2430#endif
2431
2432 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2433 AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n"));
2434 if (cbWrite == 0)
2435 return;
2436
2437 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2438
2439#ifdef IN_RING3
2440 if (!VM_IS_EMT(pVM))
2441 {
2442 pgmLock(pVM);
2443 fGrabbedLock = true;
2444 }
2445#endif
2446 /*
2447 * Copy loop on ram ranges.
2448 */
2449 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2450 for (;;)
2451 {
2452 /* Find range. */
2453 while (pRam && GCPhys > pRam->GCPhysLast)
2454 pRam = pRam->CTX_SUFF(pNext);
2455 /* Inside range or not? */
2456 if (pRam && GCPhys >= pRam->GCPhys)
2457 {
2458 /*
2459 * Must work our way thru this page by page.
2460 */
2461 RTGCPTR off = GCPhys - pRam->GCPhys;
2462 while (off < pRam->cb)
2463 {
2464 RTGCPTR iPage = off >> PAGE_SHIFT;
2465 PPGMPAGE pPage = &pRam->aPages[iPage];
2466
2467 /* Physical chunk in dynamically allocated range not present? */
2468 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
2469 {
2470 int rc;
2471 RTGCPHYS GCPhysPage = pRam->GCPhys + off;
2472#ifdef IN_RING3
2473 if (fGrabbedLock)
2474 {
2475 pgmUnlock(pVM);
2476 rc = pgmr3PhysGrowRange(pVM, GCPhysPage);
2477 if (rc == VINF_SUCCESS)
2478 PGMPhysWrite(pVM, GCPhysPage, pvBuf, cbWrite); /* try again; can't assume pRam is still valid (paranoia) */
2479 return;
2480 }
2481 rc = pgmr3PhysGrowRange(pVM, GCPhysPage);
2482#else
2483 rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhysPage);
2484#endif
2485 if (rc != VINF_SUCCESS)
2486 goto l_End;
2487 }
2488
2489 size_t cb;
2490 /* temporary hack, will reogranize is later. */
2491 /*
2492 * Virtual handlers
2493 */
2494 if ( PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2495 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
2496 {
2497 if (PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2498 {
2499 /*
2500 * Physical write handler + virtual write handler.
2501 * Consider this a quick workaround for the CSAM + shadow caching problem.
2502 *
2503 * We hand it to the shadow caching first since it requires the unchanged
2504 * data. CSAM will have to put up with it already being changed.
2505 */
2506 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
2507 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2508#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
2509 /* 1. The physical handler */
2510 PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
2511 if (pPhysNode && pPhysNode->pfnHandlerR3)
2512 {
2513 size_t cbRange = pPhysNode->Core.KeyLast - GCPhys + 1;
2514 if (cbRange < cb)
2515 cb = cbRange;
2516 if (cb > cbWrite)
2517 cb = cbWrite;
2518
2519 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2520
2521 /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
2522 rc = pPhysNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pPhysNode->pvUserR3);
2523 }
2524
2525 /* 2. The virtual handler (will see incorrect data) */
2526 PPGMVIRTHANDLER pVirtNode;
2527 unsigned iPage;
2528 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirtNode, &iPage);
2529 if (RT_SUCCESS(rc2) && pVirtNode->pfnHandlerR3)
2530 {
2531 size_t cbRange = pVirtNode->Core.KeyLast - GCPhys + 1;
2532 if (cbRange < cb)
2533 cb = cbRange;
2534 if (cb > cbWrite)
2535 cb = cbWrite;
2536 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirtNode->Core.Key & PAGE_BASE_GC_MASK)
2537 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
2538
2539 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2540
2541 /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
2542 rc2 = pVirtNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
2543 if ( ( rc2 != VINF_PGM_HANDLER_DO_DEFAULT
2544 && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2545 || ( RT_FAILURE(rc2)
2546 && RT_SUCCESS(rc)))
2547 rc = rc2;
2548 }
2549#endif /* IN_RING3 */
2550 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2551 {
2552#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2553 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2554#else
2555 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2556#endif
2557 if (cb >= cbWrite)
2558 {
2559 memcpy(pvDst, pvBuf, cbWrite);
2560 goto l_End;
2561 }
2562 memcpy(pvDst, pvBuf, cb);
2563 }
2564 else if (cb >= cbWrite)
2565 goto l_End;
2566 }
2567 else
2568 {
2569 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
2570 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2571#ifdef IN_RING3
2572/** @todo deal with this in GC and R0! */
2573 /* Search the whole tree for matching physical addresses (rather expensive!) */
2574 PPGMVIRTHANDLER pNode;
2575 unsigned iPage;
2576 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
2577 if (RT_SUCCESS(rc2) && pNode->pfnHandlerR3)
2578 {
2579 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
2580 if (cbRange < cb)
2581 cb = cbRange;
2582 if (cb > cbWrite)
2583 cb = cbWrite;
2584 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->Core.Key & PAGE_BASE_GC_MASK)
2585 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
2586
2587 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2588
2589 /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
2590 rc = pNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
2591 }
2592#endif /* IN_RING3 */
2593 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2594 {
2595#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2596 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2597#else
2598 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2599#endif
2600 if (cb >= cbWrite)
2601 {
2602 memcpy(pvDst, pvBuf, cbWrite);
2603 goto l_End;
2604 }
2605 memcpy(pvDst, pvBuf, cb);
2606 }
2607 else if (cb >= cbWrite)
2608 goto l_End;
2609 }
2610 }
2611 /*
2612 * Physical handler.
2613 */
2614 else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE)
2615 && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
2616 {
2617 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
2618 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2619#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
2620 /* find and call the handler */
2621 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
2622 if (pNode && pNode->pfnHandlerR3)
2623 {
2624 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
2625 if (cbRange < cb)
2626 cb = cbRange;
2627 if (cb > cbWrite)
2628 cb = cbWrite;
2629
2630 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2631
2632 /** @todo Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
2633 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pNode->pvUserR3);
2634 }
2635#endif /* IN_RING3 */
2636 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2637 {
2638#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2639 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2640#else
2641 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2642#endif
2643 if (cb >= cbWrite)
2644 {
2645 memcpy(pvDst, pvBuf, cbWrite);
2646 goto l_End;
2647 }
2648 memcpy(pvDst, pvBuf, cb);
2649 }
2650 else if (cb >= cbWrite)
2651 goto l_End;
2652 }
2653 else
2654 {
2655 /** @todo r=bird: missing MM_RAM_FLAGS_ROM here, we shall not allow anyone to overwrite the ROM! */
2656 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
2657 {
2658 /*
2659 * Normal memory, MMIO2 or writable shadow ROM.
2660 */
2661 case 0:
2662 case MM_RAM_FLAGS_MMIO2:
2663 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* shadow rom */
2664 {
2665#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2666 void *pvDst = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK));
2667#else
2668 void *pvDst = PGMRAMRANGE_GETHCPTR(pRam, off)
2669#endif
2670 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2671 if (cb >= cbWrite)
2672 {
2673#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
2674 if (cbWrite <= 4 && !fGrabbedLock /* i.e. EMT */)
2675 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphyswritecache, GCPhys, (uint8_t*)pvDst);
2676#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
2677 memcpy(pvDst, pvBuf, cbWrite);
2678 goto l_End;
2679 }
2680 memcpy(pvDst, pvBuf, cb);
2681 break;
2682 }
2683
2684 /*
2685 * All reserved, nothing there.
2686 */
2687 case MM_RAM_FLAGS_RESERVED:
2688 case MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2:
2689 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2690 if (cb >= cbWrite)
2691 goto l_End;
2692 break;
2693
2694
2695 /*
2696 * The rest needs to be taken more carefully.
2697 */
2698 default:
2699#if 1 /** @todo r=bird: Can you do this properly please. */
2700 /** @todo Try MMIO; quick hack */
2701 if (cbWrite <= 8 && IOMMMIOWrite(pVM, GCPhys, *(uint32_t *)pvBuf, cbWrite) == VINF_SUCCESS)
2702 goto l_End;
2703#endif
2704
2705 /** @todo fix me later. */
2706 AssertReleaseMsgFailed(("Unknown write at %RGp size %u implement the complex physical writing case %RHp\n",
2707 GCPhys, cbWrite,
2708 (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)))); /** @todo PAGE FLAGS */
2709 /* skip the write */
2710 cb = cbWrite;
2711 break;
2712 }
2713 }
2714
2715 cbWrite -= cb;
2716 off += cb;
2717 pvBuf = (const char *)pvBuf + cb;
2718 }
2719
2720 GCPhys = pRam->GCPhysLast + 1;
2721 }
2722 else
2723 {
2724 /*
2725 * Unassigned address space.
2726 */
2727 size_t cb;
2728 if ( !pRam
2729 || (cb = pRam->GCPhys - GCPhys) >= cbWrite)
2730 goto l_End;
2731
2732 cbWrite -= cb;
2733 pvBuf = (const char *)pvBuf + cb;
2734 GCPhys += cb;
2735 }
2736 }
2737l_End:
2738#ifdef IN_RING3
2739 if (fGrabbedLock)
2740 pgmUnlock(pVM);
2741#endif
2742 return;
2743}
2744
2745#endif /* Old PGMPhysWrite */
2746
2747
2748/**
2749 * Read from guest physical memory by GC physical address, bypassing
2750 * MMIO and access handlers.
2751 *
2752 * @returns VBox status.
2753 * @param pVM VM handle.
2754 * @param pvDst The destination address.
2755 * @param GCPhysSrc The source address (GC physical address).
2756 * @param cb The number of bytes to read.
2757 */
2758VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2759{
2760 /*
2761 * Treat the first page as a special case.
2762 */
2763 if (!cb)
2764 return VINF_SUCCESS;
2765
2766 /* map the 1st page */
2767 void const *pvSrc;
2768 PGMPAGEMAPLOCK Lock;
2769 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2770 if (RT_FAILURE(rc))
2771 return rc;
2772
2773 /* optimize for the case where access is completely within the first page. */
2774 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2775 if (RT_LIKELY(cb <= cbPage))
2776 {
2777 memcpy(pvDst, pvSrc, cb);
2778 PGMPhysReleasePageMappingLock(pVM, &Lock);
2779 return VINF_SUCCESS;
2780 }
2781
2782 /* copy to the end of the page. */
2783 memcpy(pvDst, pvSrc, cbPage);
2784 PGMPhysReleasePageMappingLock(pVM, &Lock);
2785 GCPhysSrc += cbPage;
2786 pvDst = (uint8_t *)pvDst + cbPage;
2787 cb -= cbPage;
2788
2789 /*
2790 * Page by page.
2791 */
2792 for (;;)
2793 {
2794 /* map the page */
2795 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2796 if (RT_FAILURE(rc))
2797 return rc;
2798
2799 /* last page? */
2800 if (cb <= PAGE_SIZE)
2801 {
2802 memcpy(pvDst, pvSrc, cb);
2803 PGMPhysReleasePageMappingLock(pVM, &Lock);
2804 return VINF_SUCCESS;
2805 }
2806
2807 /* copy the entire page and advance */
2808 memcpy(pvDst, pvSrc, PAGE_SIZE);
2809 PGMPhysReleasePageMappingLock(pVM, &Lock);
2810 GCPhysSrc += PAGE_SIZE;
2811 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2812 cb -= PAGE_SIZE;
2813 }
2814 /* won't ever get here. */
2815}
2816
2817#ifndef IN_RC /* Ring 0 & 3 only. (Just not needed in GC.) */
2818
2819/**
2820 * Write to guest physical memory referenced by GC pointer.
2821 * Write memory to GC physical address in guest physical memory.
2822 *
2823 * This will bypass MMIO and access handlers.
2824 *
2825 * @returns VBox status.
2826 * @param pVM VM handle.
2827 * @param GCPhysDst The GC physical address of the destination.
2828 * @param pvSrc The source buffer.
2829 * @param cb The number of bytes to write.
2830 */
2831VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2832{
2833 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2834
2835 /*
2836 * Treat the first page as a special case.
2837 */
2838 if (!cb)
2839 return VINF_SUCCESS;
2840
2841 /* map the 1st page */
2842 void *pvDst;
2843 PGMPAGEMAPLOCK Lock;
2844 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2845 if (RT_FAILURE(rc))
2846 return rc;
2847
2848 /* optimize for the case where access is completely within the first page. */
2849 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2850 if (RT_LIKELY(cb <= cbPage))
2851 {
2852 memcpy(pvDst, pvSrc, cb);
2853 PGMPhysReleasePageMappingLock(pVM, &Lock);
2854 return VINF_SUCCESS;
2855 }
2856
2857 /* copy to the end of the page. */
2858 memcpy(pvDst, pvSrc, cbPage);
2859 PGMPhysReleasePageMappingLock(pVM, &Lock);
2860 GCPhysDst += cbPage;
2861 pvSrc = (const uint8_t *)pvSrc + cbPage;
2862 cb -= cbPage;
2863
2864 /*
2865 * Page by page.
2866 */
2867 for (;;)
2868 {
2869 /* map the page */
2870 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2871 if (RT_FAILURE(rc))
2872 return rc;
2873
2874 /* last page? */
2875 if (cb <= PAGE_SIZE)
2876 {
2877 memcpy(pvDst, pvSrc, cb);
2878 PGMPhysReleasePageMappingLock(pVM, &Lock);
2879 return VINF_SUCCESS;
2880 }
2881
2882 /* copy the entire page and advance */
2883 memcpy(pvDst, pvSrc, PAGE_SIZE);
2884 PGMPhysReleasePageMappingLock(pVM, &Lock);
2885 GCPhysDst += PAGE_SIZE;
2886 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2887 cb -= PAGE_SIZE;
2888 }
2889 /* won't ever get here. */
2890}
2891
2892
2893/**
2894 * Read from guest physical memory referenced by GC pointer.
2895 *
2896 * This function uses the current CR3/CR0/CR4 of the guest and will
2897 * bypass access handlers and not set any accessed bits.
2898 *
2899 * @returns VBox status.
2900 * @param pVM VM handle.
2901 * @param pvDst The destination address.
2902 * @param GCPtrSrc The source address (GC pointer).
2903 * @param cb The number of bytes to read.
2904 */
2905VMMDECL(int) PGMPhysSimpleReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2906{
2907 /*
2908 * Treat the first page as a special case.
2909 */
2910 if (!cb)
2911 return VINF_SUCCESS;
2912
2913 /* map the 1st page */
2914 void const *pvSrc;
2915 PGMPAGEMAPLOCK Lock;
2916 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2917 if (RT_FAILURE(rc))
2918 return rc;
2919
2920 /* optimize for the case where access is completely within the first page. */
2921 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2922 if (RT_LIKELY(cb <= cbPage))
2923 {
2924 memcpy(pvDst, pvSrc, cb);
2925 PGMPhysReleasePageMappingLock(pVM, &Lock);
2926 return VINF_SUCCESS;
2927 }
2928
2929 /* copy to the end of the page. */
2930 memcpy(pvDst, pvSrc, cbPage);
2931 PGMPhysReleasePageMappingLock(pVM, &Lock);
2932 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2933 pvDst = (uint8_t *)pvDst + cbPage;
2934 cb -= cbPage;
2935
2936 /*
2937 * Page by page.
2938 */
2939 for (;;)
2940 {
2941 /* map the page */
2942 rc = PGMPhysGCPtr2CCPtrReadOnly(pVM, GCPtrSrc, &pvSrc, &Lock);
2943 if (RT_FAILURE(rc))
2944 return rc;
2945
2946 /* last page? */
2947 if (cb <= PAGE_SIZE)
2948 {
2949 memcpy(pvDst, pvSrc, cb);
2950 PGMPhysReleasePageMappingLock(pVM, &Lock);
2951 return VINF_SUCCESS;
2952 }
2953
2954 /* copy the entire page and advance */
2955 memcpy(pvDst, pvSrc, PAGE_SIZE);
2956 PGMPhysReleasePageMappingLock(pVM, &Lock);
2957 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2958 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2959 cb -= PAGE_SIZE;
2960 }
2961 /* won't ever get here. */
2962}
2963
2964
2965/**
2966 * Write to guest physical memory referenced by GC pointer.
2967 *
2968 * This function uses the current CR3/CR0/CR4 of the guest and will
2969 * bypass access handlers and not set dirty or accessed bits.
2970 *
2971 * @returns VBox status.
2972 * @param pVM VM handle.
2973 * @param GCPtrDst The destination address (GC pointer).
2974 * @param pvSrc The source address.
2975 * @param cb The number of bytes to write.
2976 */
2977VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2978{
2979 /*
2980 * Treat the first page as a special case.
2981 */
2982 if (!cb)
2983 return VINF_SUCCESS;
2984
2985 /* map the 1st page */
2986 void *pvDst;
2987 PGMPAGEMAPLOCK Lock;
2988 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
2989 if (RT_FAILURE(rc))
2990 return rc;
2991
2992 /* optimize for the case where access is completely within the first page. */
2993 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2994 if (RT_LIKELY(cb <= cbPage))
2995 {
2996 memcpy(pvDst, pvSrc, cb);
2997 PGMPhysReleasePageMappingLock(pVM, &Lock);
2998 return VINF_SUCCESS;
2999 }
3000
3001 /* copy to the end of the page. */
3002 memcpy(pvDst, pvSrc, cbPage);
3003 PGMPhysReleasePageMappingLock(pVM, &Lock);
3004 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3005 pvSrc = (const uint8_t *)pvSrc + cbPage;
3006 cb -= cbPage;
3007
3008 /*
3009 * Page by page.
3010 */
3011 for (;;)
3012 {
3013 /* map the page */
3014 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
3015 if (RT_FAILURE(rc))
3016 return rc;
3017
3018 /* last page? */
3019 if (cb <= PAGE_SIZE)
3020 {
3021 memcpy(pvDst, pvSrc, cb);
3022 PGMPhysReleasePageMappingLock(pVM, &Lock);
3023 return VINF_SUCCESS;
3024 }
3025
3026 /* copy the entire page and advance */
3027 memcpy(pvDst, pvSrc, PAGE_SIZE);
3028 PGMPhysReleasePageMappingLock(pVM, &Lock);
3029 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3030 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3031 cb -= PAGE_SIZE;
3032 }
3033 /* won't ever get here. */
3034}
3035
3036
3037/**
3038 * Write to guest physical memory referenced by GC pointer and update the PTE.
3039 *
3040 * This function uses the current CR3/CR0/CR4 of the guest and will
3041 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3042 *
3043 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3044 *
3045 * @returns VBox status.
3046 * @param pVM VM handle.
3047 * @param GCPtrDst The destination address (GC pointer).
3048 * @param pvSrc The source address.
3049 * @param cb The number of bytes to write.
3050 */
3051VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3052{
3053 /*
3054 * Treat the first page as a special case.
3055 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3056 */
3057 if (!cb)
3058 return VINF_SUCCESS;
3059
3060 /* map the 1st page */
3061 void *pvDst;
3062 PGMPAGEMAPLOCK Lock;
3063 int rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
3064 if (RT_FAILURE(rc))
3065 return rc;
3066
3067 /* optimize for the case where access is completely within the first page. */
3068 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3069 if (RT_LIKELY(cb <= cbPage))
3070 {
3071 memcpy(pvDst, pvSrc, cb);
3072 PGMPhysReleasePageMappingLock(pVM, &Lock);
3073 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3074 return VINF_SUCCESS;
3075 }
3076
3077 /* copy to the end of the page. */
3078 memcpy(pvDst, pvSrc, cbPage);
3079 PGMPhysReleasePageMappingLock(pVM, &Lock);
3080 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3081 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3082 pvSrc = (const uint8_t *)pvSrc + cbPage;
3083 cb -= cbPage;
3084
3085 /*
3086 * Page by page.
3087 */
3088 for (;;)
3089 {
3090 /* map the page */
3091 rc = PGMPhysGCPtr2CCPtr(pVM, GCPtrDst, &pvDst, &Lock);
3092 if (RT_FAILURE(rc))
3093 return rc;
3094
3095 /* last page? */
3096 if (cb <= PAGE_SIZE)
3097 {
3098 memcpy(pvDst, pvSrc, cb);
3099 PGMPhysReleasePageMappingLock(pVM, &Lock);
3100 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3101 return VINF_SUCCESS;
3102 }
3103
3104 /* copy the entire page and advance */
3105 memcpy(pvDst, pvSrc, PAGE_SIZE);
3106 PGMPhysReleasePageMappingLock(pVM, &Lock);
3107 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3108 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3109 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3110 cb -= PAGE_SIZE;
3111 }
3112 /* won't ever get here. */
3113}
3114
3115
3116/**
3117 * Read from guest physical memory referenced by GC pointer.
3118 *
3119 * This function uses the current CR3/CR0/CR4 of the guest and will
3120 * respect access handlers and set accessed bits.
3121 *
3122 * @returns VBox status.
3123 * @param pVM VM handle.
3124 * @param pvDst The destination address.
3125 * @param GCPtrSrc The source address (GC pointer).
3126 * @param cb The number of bytes to read.
3127 * @thread The vCPU EMT.
3128 */
3129VMMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3130{
3131 RTGCPHYS GCPhys;
3132 uint64_t fFlags;
3133 int rc;
3134
3135 /*
3136 * Anything to do?
3137 */
3138 if (!cb)
3139 return VINF_SUCCESS;
3140
3141 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3142
3143 /*
3144 * Optimize reads within a single page.
3145 */
3146 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3147 {
3148 /* Convert virtual to physical address + flags */
3149 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3150 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3151 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3152
3153 /* mark the guest page as accessed. */
3154 if (!(fFlags & X86_PTE_A))
3155 {
3156 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3157 AssertRC(rc);
3158 }
3159
3160#ifdef VBOX_WITH_NEW_PHYS_CODE
3161 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3162#else
3163 PGMPhysRead(pVM, GCPhys, pvDst, cb);
3164 return VINF_SUCCESS;
3165#endif
3166 }
3167
3168 /*
3169 * Page by page.
3170 */
3171 for (;;)
3172 {
3173 /* Convert virtual to physical address + flags */
3174 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3175 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3176 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3177
3178 /* mark the guest page as accessed. */
3179 if (!(fFlags & X86_PTE_A))
3180 {
3181 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3182 AssertRC(rc);
3183 }
3184
3185 /* copy */
3186 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3187#ifdef VBOX_WITH_NEW_PHYS_CODE
3188 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3189 if (cbRead >= cb || RT_FAILURE(rc))
3190 return rc;
3191#else
3192 if (cbRead >= cb)
3193 {
3194 PGMPhysRead(pVM, GCPhys, pvDst, cb);
3195 return VINF_SUCCESS;
3196 }
3197 PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3198#endif
3199
3200 /* next */
3201 cb -= cbRead;
3202 pvDst = (uint8_t *)pvDst + cbRead;
3203 GCPtrSrc += cbRead;
3204 }
3205}
3206
3207
3208/**
3209 * Write to guest physical memory referenced by GC pointer.
3210 *
3211 * This function uses the current CR3/CR0/CR4 of the guest and will
3212 * respect access handlers and set dirty and accessed bits.
3213 *
3214 * @returns VBox status.
3215 * @retval VINF_SUCCESS.
3216 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3217 *
3218 * @param pVM VM handle.
3219 * @param GCPtrDst The destination address (GC pointer).
3220 * @param pvSrc The source address.
3221 * @param cb The number of bytes to write.
3222 */
3223VMMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3224{
3225 RTGCPHYS GCPhys;
3226 uint64_t fFlags;
3227 int rc;
3228
3229 /*
3230 * Anything to do?
3231 */
3232 if (!cb)
3233 return VINF_SUCCESS;
3234
3235 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3236
3237 /*
3238 * Optimize writes within a single page.
3239 */
3240 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3241 {
3242 /* Convert virtual to physical address + flags */
3243 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3244 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3245 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3246
3247 /* Mention when we ignore X86_PTE_RW... */
3248 if (!(fFlags & X86_PTE_RW))
3249 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3250
3251 /* Mark the guest page as accessed and dirty if necessary. */
3252 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3253 {
3254 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3255 AssertRC(rc);
3256 }
3257
3258#ifdef VBOX_WITH_NEW_PHYS_CODE
3259 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3260#else
3261 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3262 return VINF_SUCCESS;
3263#endif
3264 }
3265
3266 /*
3267 * Page by page.
3268 */
3269 for (;;)
3270 {
3271 /* Convert virtual to physical address + flags */
3272 rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3273 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3274 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3275
3276 /* Mention when we ignore X86_PTE_RW... */
3277 if (!(fFlags & X86_PTE_RW))
3278 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3279
3280 /* Mark the guest page as accessed and dirty if necessary. */
3281 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3282 {
3283 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3284 AssertRC(rc);
3285 }
3286
3287 /* copy */
3288 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3289#ifdef VBOX_WITH_NEW_PHYS_CODE
3290 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3291 if (cbWrite >= cb || RT_FAILURE(rc))
3292 return rc;
3293#else
3294 if (cbWrite >= cb)
3295 {
3296 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3297 return VINF_SUCCESS;
3298 }
3299 PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3300#endif
3301
3302 /* next */
3303 cb -= cbWrite;
3304 pvSrc = (uint8_t *)pvSrc + cbWrite;
3305 GCPtrDst += cbWrite;
3306 }
3307}
3308
3309#endif /* !IN_RC */
3310
3311/**
3312 * Performs a read of guest virtual memory for instruction emulation.
3313 *
3314 * This will check permissions, raise exceptions and update the access bits.
3315 *
3316 * The current implementation will bypass all access handlers. It may later be
3317 * changed to at least respect MMIO.
3318 *
3319 *
3320 * @returns VBox status code suitable to scheduling.
3321 * @retval VINF_SUCCESS if the read was performed successfully.
3322 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3323 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3324 *
3325 * @param pVM The VM handle.
3326 * @param pCtxCore The context core.
3327 * @param pvDst Where to put the bytes we've read.
3328 * @param GCPtrSrc The source address.
3329 * @param cb The number of bytes to read. Not more than a page.
3330 *
3331 * @remark This function will dynamically map physical pages in GC. This may unmap
3332 * mappings done by the caller. Be careful!
3333 */
3334VMMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3335{
3336 Assert(cb <= PAGE_SIZE);
3337
3338/** @todo r=bird: This isn't perfect!
3339 * -# It's not checking for reserved bits being 1.
3340 * -# It's not correctly dealing with the access bit.
3341 * -# It's not respecting MMIO memory or any other access handlers.
3342 */
3343 /*
3344 * 1. Translate virtual to physical. This may fault.
3345 * 2. Map the physical address.
3346 * 3. Do the read operation.
3347 * 4. Set access bits if required.
3348 */
3349 int rc;
3350 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3351 if (cb <= cb1)
3352 {
3353 /*
3354 * Not crossing pages.
3355 */
3356 RTGCPHYS GCPhys;
3357 uint64_t fFlags;
3358 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
3359 if (RT_SUCCESS(rc))
3360 {
3361 /** @todo we should check reserved bits ... */
3362 void *pvSrc;
3363 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
3364 switch (rc)
3365 {
3366 case VINF_SUCCESS:
3367 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3368 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3369 break;
3370 case VERR_PGM_PHYS_PAGE_RESERVED:
3371 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3372 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
3373 break;
3374 default:
3375 return rc;
3376 }
3377
3378 /** @todo access bit emulation isn't 100% correct. */
3379 if (!(fFlags & X86_PTE_A))
3380 {
3381 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3382 AssertRC(rc);
3383 }
3384 return VINF_SUCCESS;
3385 }
3386 }
3387 else
3388 {
3389 /*
3390 * Crosses pages.
3391 */
3392 size_t cb2 = cb - cb1;
3393 uint64_t fFlags1;
3394 RTGCPHYS GCPhys1;
3395 uint64_t fFlags2;
3396 RTGCPHYS GCPhys2;
3397 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
3398 if (RT_SUCCESS(rc))
3399 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3400 if (RT_SUCCESS(rc))
3401 {
3402 /** @todo we should check reserved bits ... */
3403 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3404 void *pvSrc1;
3405 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
3406 switch (rc)
3407 {
3408 case VINF_SUCCESS:
3409 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3410 break;
3411 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3412 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
3413 break;
3414 default:
3415 return rc;
3416 }
3417
3418 void *pvSrc2;
3419 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
3420 switch (rc)
3421 {
3422 case VINF_SUCCESS:
3423 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3424 break;
3425 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3426 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
3427 break;
3428 default:
3429 return rc;
3430 }
3431
3432 if (!(fFlags1 & X86_PTE_A))
3433 {
3434 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3435 AssertRC(rc);
3436 }
3437 if (!(fFlags2 & X86_PTE_A))
3438 {
3439 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3440 AssertRC(rc);
3441 }
3442 return VINF_SUCCESS;
3443 }
3444 }
3445
3446 /*
3447 * Raise a #PF.
3448 */
3449 uint32_t uErr;
3450
3451 /* Get the current privilege level. */
3452 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3453 switch (rc)
3454 {
3455 case VINF_SUCCESS:
3456 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3457 break;
3458
3459 case VERR_PAGE_NOT_PRESENT:
3460 case VERR_PAGE_TABLE_NOT_PRESENT:
3461 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3462 break;
3463
3464 default:
3465 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3466 return rc;
3467 }
3468 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3469 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3470}
3471
3472
3473/**
3474 * Performs a read of guest virtual memory for instruction emulation.
3475 *
3476 * This will check permissions, raise exceptions and update the access bits.
3477 *
3478 * The current implementation will bypass all access handlers. It may later be
3479 * changed to at least respect MMIO.
3480 *
3481 *
3482 * @returns VBox status code suitable to scheduling.
3483 * @retval VINF_SUCCESS if the read was performed successfully.
3484 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3485 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3486 *
3487 * @param pVM The VM handle.
3488 * @param pCtxCore The context core.
3489 * @param pvDst Where to put the bytes we've read.
3490 * @param GCPtrSrc The source address.
3491 * @param cb The number of bytes to read. Not more than a page.
3492 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3493 * an appropriate error status will be returned (no
3494 * informational at all).
3495 *
3496 *
3497 * @remarks Takes the PGM lock.
3498 * @remarks A page fault on the 2nd page of the access will be raised without
3499 * writing the bits on the first page since we're ASSUMING that the
3500 * caller is emulating an instruction access.
3501 * @remarks This function will dynamically map physical pages in GC. This may
3502 * unmap mappings done by the caller. Be careful!
3503 */
3504VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3505{
3506 Assert(cb <= PAGE_SIZE);
3507
3508 /*
3509 * 1. Translate virtual to physical. This may fault.
3510 * 2. Map the physical address.
3511 * 3. Do the read operation.
3512 * 4. Set access bits if required.
3513 */
3514 int rc;
3515 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3516 if (cb <= cb1)
3517 {
3518 /*
3519 * Not crossing pages.
3520 */
3521 RTGCPHYS GCPhys;
3522 uint64_t fFlags;
3523 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
3524 if (RT_SUCCESS(rc))
3525 {
3526 if (1) /** @todo we should check reserved bits ... */
3527 {
3528 const void *pvSrc;
3529 PGMPAGEMAPLOCK Lock;
3530 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3531 switch (rc)
3532 {
3533 case VINF_SUCCESS:
3534 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3535 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3536 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3537 break;
3538 case VERR_PGM_PHYS_PAGE_RESERVED:
3539 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3540 memset(pvDst, 0xff, cb);
3541 break;
3542 default:
3543 AssertMsgFailed(("%Rrc\n", rc));
3544 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3545 return rc;
3546 }
3547 PGMPhysReleasePageMappingLock(pVM, &Lock);
3548
3549 if (!(fFlags & X86_PTE_A))
3550 {
3551 /** @todo access bit emulation isn't 100% correct. */
3552 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3553 AssertRC(rc);
3554 }
3555 return VINF_SUCCESS;
3556 }
3557 }
3558 }
3559 else
3560 {
3561 /*
3562 * Crosses pages.
3563 */
3564 size_t cb2 = cb - cb1;
3565 uint64_t fFlags1;
3566 RTGCPHYS GCPhys1;
3567 uint64_t fFlags2;
3568 RTGCPHYS GCPhys2;
3569 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
3570 if (RT_SUCCESS(rc))
3571 {
3572 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3573 if (RT_SUCCESS(rc))
3574 {
3575 if (1) /** @todo we should check reserved bits ... */
3576 {
3577 const void *pvSrc;
3578 PGMPAGEMAPLOCK Lock;
3579 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3580 switch (rc)
3581 {
3582 case VINF_SUCCESS:
3583 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3584 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3585 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3586 PGMPhysReleasePageMappingLock(pVM, &Lock);
3587 break;
3588 case VERR_PGM_PHYS_PAGE_RESERVED:
3589 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3590 memset(pvDst, 0xff, cb1);
3591 break;
3592 default:
3593 AssertMsgFailed(("%Rrc\n", rc));
3594 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3595 return rc;
3596 }
3597
3598 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3599 switch (rc)
3600 {
3601 case VINF_SUCCESS:
3602 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3603 PGMPhysReleasePageMappingLock(pVM, &Lock);
3604 break;
3605 case VERR_PGM_PHYS_PAGE_RESERVED:
3606 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3607 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3608 break;
3609 default:
3610 AssertMsgFailed(("%Rrc\n", rc));
3611 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3612 return rc;
3613 }
3614
3615 if (!(fFlags1 & X86_PTE_A))
3616 {
3617 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3618 AssertRC(rc);
3619 }
3620 if (!(fFlags2 & X86_PTE_A))
3621 {
3622 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3623 AssertRC(rc);
3624 }
3625 return VINF_SUCCESS;
3626 }
3627 /* sort out which page */
3628 }
3629 else
3630 GCPtrSrc += cb1; /* fault on 2nd page */
3631 }
3632 }
3633
3634 /*
3635 * Raise a #PF if we're allowed to do that.
3636 */
3637 /* Calc the error bits. */
3638 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3639 uint32_t uErr;
3640 switch (rc)
3641 {
3642 case VINF_SUCCESS:
3643 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3644 rc = VERR_ACCESS_DENIED;
3645 break;
3646
3647 case VERR_PAGE_NOT_PRESENT:
3648 case VERR_PAGE_TABLE_NOT_PRESENT:
3649 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3650 break;
3651
3652 default:
3653 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3654 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3655 return rc;
3656 }
3657 if (fRaiseTrap)
3658 {
3659 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3660 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3661 }
3662 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3663 return rc;
3664}
3665
3666
3667/**
3668 * Performs a write to guest virtual memory for instruction emulation.
3669 *
3670 * This will check permissions, raise exceptions and update the dirty and access
3671 * bits.
3672 *
3673 * @returns VBox status code suitable to scheduling.
3674 * @retval VINF_SUCCESS if the read was performed successfully.
3675 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3676 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3677 *
3678 * @param pVM The VM handle.
3679 * @param pCtxCore The context core.
3680 * @param GCPtrDst The destination address.
3681 * @param pvSrc What to write.
3682 * @param cb The number of bytes to write. Not more than a page.
3683 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3684 * an appropriate error status will be returned (no
3685 * informational at all).
3686 *
3687 * @remarks Takes the PGM lock.
3688 * @remarks A page fault on the 2nd page of the access will be raised without
3689 * writing the bits on the first page since we're ASSUMING that the
3690 * caller is emulating an instruction access.
3691 * @remarks This function will dynamically map physical pages in GC. This may
3692 * unmap mappings done by the caller. Be careful!
3693 */
3694VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3695{
3696 Assert(cb <= PAGE_SIZE);
3697
3698 /*
3699 * 1. Translate virtual to physical. This may fault.
3700 * 2. Map the physical address.
3701 * 3. Do the write operation.
3702 * 4. Set access bits if required.
3703 */
3704 int rc;
3705 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3706 if (cb <= cb1)
3707 {
3708 /*
3709 * Not crossing pages.
3710 */
3711 RTGCPHYS GCPhys;
3712 uint64_t fFlags;
3713 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags, &GCPhys);
3714 if (RT_SUCCESS(rc))
3715 {
3716 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3717 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
3718 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3719 {
3720 void *pvDst;
3721 PGMPAGEMAPLOCK Lock;
3722 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3723 switch (rc)
3724 {
3725 case VINF_SUCCESS:
3726 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3727 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3728 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3729 PGMPhysReleasePageMappingLock(pVM, &Lock);
3730 break;
3731 case VERR_PGM_PHYS_PAGE_RESERVED:
3732 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3733 /* bit bucket */
3734 break;
3735 default:
3736 AssertMsgFailed(("%Rrc\n", rc));
3737 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3738 return rc;
3739 }
3740
3741 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3742 {
3743 /** @todo dirty & access bit emulation isn't 100% correct. */
3744 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3745 AssertRC(rc);
3746 }
3747 return VINF_SUCCESS;
3748 }
3749 rc = VERR_ACCESS_DENIED;
3750 }
3751 }
3752 else
3753 {
3754 /*
3755 * Crosses pages.
3756 */
3757 size_t cb2 = cb - cb1;
3758 uint64_t fFlags1;
3759 RTGCPHYS GCPhys1;
3760 uint64_t fFlags2;
3761 RTGCPHYS GCPhys2;
3762 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst, &fFlags1, &GCPhys1);
3763 if (RT_SUCCESS(rc))
3764 {
3765 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3766 if (RT_SUCCESS(rc))
3767 {
3768 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3769 && (fFlags2 & X86_PTE_RW))
3770 || ( !(CPUMGetGuestCR0(pVM) & X86_CR0_WP)
3771 && CPUMGetGuestCPL(pVM, pCtxCore) <= 2) )
3772 {
3773 void *pvDst;
3774 PGMPAGEMAPLOCK Lock;
3775 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3776 switch (rc)
3777 {
3778 case VINF_SUCCESS:
3779 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3780 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3781 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3782 PGMPhysReleasePageMappingLock(pVM, &Lock);
3783 break;
3784 case VERR_PGM_PHYS_PAGE_RESERVED:
3785 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3786 /* bit bucket */
3787 break;
3788 default:
3789 AssertMsgFailed(("%Rrc\n", rc));
3790 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3791 return rc;
3792 }
3793
3794 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3795 switch (rc)
3796 {
3797 case VINF_SUCCESS:
3798 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3799 PGMPhysReleasePageMappingLock(pVM, &Lock);
3800 break;
3801 case VERR_PGM_PHYS_PAGE_RESERVED:
3802 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3803 /* bit bucket */
3804 break;
3805 default:
3806 AssertMsgFailed(("%Rrc\n", rc));
3807 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3808 return rc;
3809 }
3810
3811 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3812 {
3813 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3814 AssertRC(rc);
3815 }
3816 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3817 {
3818 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3819 AssertRC(rc);
3820 }
3821 return VINF_SUCCESS;
3822 }
3823 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3824 GCPtrDst += cb1; /* fault on the 2nd page. */
3825 rc = VERR_ACCESS_DENIED;
3826 }
3827 else
3828 GCPtrDst += cb1; /* fault on the 2nd page. */
3829 }
3830 }
3831
3832 /*
3833 * Raise a #PF if we're allowed to do that.
3834 */
3835 /* Calc the error bits. */
3836 uint32_t uErr;
3837 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
3838 switch (rc)
3839 {
3840 case VINF_SUCCESS:
3841 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3842 rc = VERR_ACCESS_DENIED;
3843 break;
3844
3845 case VERR_ACCESS_DENIED:
3846 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3847 break;
3848
3849 case VERR_PAGE_NOT_PRESENT:
3850 case VERR_PAGE_TABLE_NOT_PRESENT:
3851 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3852 break;
3853
3854 default:
3855 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3856 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3857 return rc;
3858 }
3859 if (fRaiseTrap)
3860 {
3861 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3862 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3863 }
3864 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3865 return rc;
3866}
3867
3868
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette