VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 4859

Last change on this file since 4859 was 4811, checked in by vboxsync, 17 years ago

Split VMMR0Entry into VMMR0EntryInt, VMMR0EntryFast and VMMr0EntryEx. This will prevent the SUPCallVMMR0Ex path from causing harm and messing up the paths that has to be optimized.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 78.1 KB
Line 
1/* $Id: PGMAllPhys.cpp 4811 2007-09-14 17:53:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @def PGM_IGNORE_RAM_FLAGS_RESERVED
19 * Don't respect the MM_RAM_FLAGS_RESERVED flag when converting to HC addresses.
20 *
21 * Since this flag is currently incorrectly kept set for ROM regions we will
22 * have to ignore it for now so we don't break stuff.
23 *
24 * @todo this has been fixed now I believe, remove this hack.
25 */
26#define PGM_IGNORE_RAM_FLAGS_RESERVED
27
28
29/*******************************************************************************
30* Header Files *
31*******************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#include <VBox/pgm.h>
34#include <VBox/trpm.h>
35#include <VBox/vmm.h>
36#include <VBox/iom.h>
37#include <VBox/rem.h>
38#include "PGMInternal.h"
39#include <VBox/vm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <iprt/assert.h>
43#include <iprt/string.h>
44#include <iprt/asm.h>
45#include <VBox/log.h>
46#ifdef IN_RING3
47# include <iprt/thread.h>
48#endif
49
50
51
52/**
53 * Checks if Address Gate 20 is enabled or not.
54 *
55 * @returns true if enabled.
56 * @returns false if disabled.
57 * @param pVM VM handle.
58 */
59PGMDECL(bool) PGMPhysIsA20Enabled(PVM pVM)
60{
61 LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
62 return !!pVM->pgm.s.fA20Enabled ; /* stupid MS compiler doesn't trust me. */
63}
64
65
66/**
67 * Validates a GC physical address.
68 *
69 * @returns true if valid.
70 * @returns false if invalid.
71 * @param pVM The VM handle.
72 * @param GCPhys The physical address to validate.
73 */
74PGMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
75{
76 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
77 return pPage != NULL;
78}
79
80
81/**
82 * Checks if a GC physical address is a normal page,
83 * i.e. not ROM, MMIO or reserved.
84 *
85 * @returns true if normal.
86 * @returns false if invalid, ROM, MMIO or reserved page.
87 * @param pVM The VM handle.
88 * @param GCPhys The physical address to check.
89 */
90PGMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
91{
92 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
93 return pPage
94 && !(pPage->HCPhys & (MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2));
95}
96
97
98/**
99 * Converts a GC physical address to a HC physical address.
100 *
101 * @returns VINF_SUCCESS on success.
102 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
103 * page but has no physical backing.
104 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
105 * GC physical address.
106 *
107 * @param pVM The VM handle.
108 * @param GCPhys The GC physical address to convert.
109 * @param pHCPhys Where to store the HC physical address on success.
110 */
111PGMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
112{
113 PPGMPAGE pPage;
114 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
115 if (VBOX_FAILURE(rc))
116 return rc;
117
118#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
119 if (RT_UNLIKELY(pPage->HCPhys & MM_RAM_FLAGS_RESERVED)) /** @todo PAGE FLAGS */
120 return VERR_PGM_PHYS_PAGE_RESERVED;
121#endif
122
123 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
124 return VINF_SUCCESS;
125}
126
127
128/**
129 * Invalidates the GC page mapping TLB.
130 *
131 * @param pVM The VM handle.
132 */
133PDMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
134{
135 /* later */
136 NOREF(pVM);
137}
138
139
140/**
141 * Invalidates the ring-0 page mapping TLB.
142 *
143 * @param pVM The VM handle.
144 */
145PDMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
146{
147 PGMPhysInvalidatePageR3MapTLB(pVM);
148}
149
150
151/**
152 * Invalidates the ring-3 page mapping TLB.
153 *
154 * @param pVM The VM handle.
155 */
156PDMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
157{
158 pgmLock(pVM);
159 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
160 {
161 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
162 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
163 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
164 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
165 }
166 pgmUnlock(pVM);
167}
168
169
170
171/**
172 * Makes sure that there is at least one handy page ready for use.
173 *
174 * This will also take the appropriate actions when reaching water-marks.
175 *
176 * @returns The following VBox status codes.
177 * @retval VINF_SUCCESS on success.
178 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
179 *
180 * @param pVM The VM handle.
181 *
182 * @remarks Must be called from within the PGM critical section. It may
183 * nip back to ring-3/0 in some cases.
184 */
185static int pgmPhysEnsureHandyPage(PVM pVM)
186{
187 /** @remarks
188 * low-water mark logic for R0 & GC:
189 * - 75%: Set FF.
190 * - 50%: Force return to ring-3 ASAP.
191 *
192 * For ring-3 there is a little problem wrt to the recompiler, so:
193 * - 75%: Set FF.
194 * - 50%: Try allocate pages; on failure we'll force REM to quite ASAP.
195 *
196 * The basic idea is that we should be able to get out of any situation with
197 * only 50% of handy pages remaining.
198 *
199 * At the moment we'll not adjust the number of handy pages relative to the
200 * actual VM RAM committment, that's too much work for now.
201 */
202 Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
203 if ( !pVM->pgm.s.cHandyPages
204#ifdef IN_RING3
205 || pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */
206#endif
207 )
208 {
209 Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
210#ifdef IN_RING3
211 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
212#elif defined(IN_RING0)
213 /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */
214 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
215#else
216 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
217#endif
218 if (RT_UNLIKELY(rc != VINF_SUCCESS))
219 {
220 Assert(rc == VINF_EM_NO_MEMORY);
221 if (!pVM->pgm.s.cHandyPages)
222 {
223 LogRel(("PGM: no more handy pages!\n"));
224 return VERR_EM_NO_MEMORY;
225 }
226 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
227#ifdef IN_RING3
228 REMR3NotifyFF(pVM);
229#else
230 VM_FF_SET(pVM, VM_FF_TO_R3);
231#endif
232 }
233 Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
234 }
235 else if (pVM->pgm.s.cHandyPages - 1 <= (RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 4) * 3) /* 75% */
236 {
237 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
238#ifndef IN_RING3
239 if (pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2)
240 {
241 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
242 VM_FF_SET(pVM, VM_FF_TO_R3);
243 }
244#endif
245 }
246
247 return VINF_SUCCESS;
248}
249
250
251/**
252 * Replace a zero or shared page with new page that we can write to.
253 *
254 * @returns The following VBox status codes.
255 * @retval VINF_SUCCESS on success, pPage is modified.
256 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
257 *
258 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
259 *
260 * @param pVM The VM address.
261 * @param pPage The physical page tracking structure. This will
262 * be modified on success.
263 * @param GCPhys The address of the page.
264 *
265 * @remarks Must be called from within the PGM critical section. It may
266 * nip back to ring-3/0 in some cases.
267 *
268 * @remarks This function shouldn't really fail, however if it does
269 * it probably means we've screwed up the size of the amount
270 * and/or the low-water mark of handy pages. Or, that some
271 * device I/O is causing a lot of pages to be allocated while
272 * while the host is in a low-memory condition.
273 */
274int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
275{
276 /*
277 * Ensure that we've got a page handy, take it and use it.
278 */
279 int rc = pgmPhysEnsureHandyPage(pVM);
280 if (VBOX_FAILURE(rc))
281 {
282 Assert(rc == VERR_EM_NO_MEMORY);
283 return rc;
284 }
285 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%d %RGp\n", PGM_PAGE_GET_STATE(pPage), GCPhys));
286 Assert(!PGM_PAGE_IS_RESERVED(pPage));
287 Assert(!PGM_PAGE_IS_MMIO(pPage));
288
289 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
290 Assert(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages));
291 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
292 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
293 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
294 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
295
296 /*
297 * There are one or two action to be taken the next time we allocate handy pages:
298 * - Tell the GMM (global memory manager) what the page is being used for.
299 * (Speeds up replacement operations - sharing and defragmenting.)
300 * - If the current backing is shared, it must be freed.
301 */
302 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
303 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys;
304
305 if (PGM_PAGE_IS_SHARED(pPage))
306 {
307 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
308 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
309 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
310
311 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
312 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
313 STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceShared);
314 pVM->pgm.s.cSharedPages--;
315 }
316 else
317 {
318 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
319 STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceZero);
320 pVM->pgm.s.cZeroPages--;
321 }
322
323 /*
324 * Do the PGMPAGE modifications.
325 */
326 pVM->pgm.s.cPrivatePages++;
327 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
328 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
329 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
330
331 return VINF_SUCCESS;
332}
333
334
335/**
336 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
337 *
338 * @returns VBox status code.
339 * @retval VINF_SUCCESS on success.
340 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
341 *
342 * @param pVM The VM address.
343 * @param pPage The physical page tracking structure.
344 * @param GCPhys The address of the page.
345 *
346 * @remarks Called from within the PGM critical section.
347 */
348int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
349{
350 switch (pPage->u2State)
351 {
352 case PGM_PAGE_STATE_WRITE_MONITORED:
353 pPage->fWrittenTo = true;
354 pPage->u2State = PGM_PAGE_STATE_ALLOCATED;
355 /* fall thru */
356 default: /* to shut up GCC */
357 case PGM_PAGE_STATE_ALLOCATED:
358 return VINF_SUCCESS;
359
360 /*
361 * Zero pages can be dummy pages for MMIO or reserved memory,
362 * so we need to check the flags before joining cause with
363 * shared page replacement.
364 */
365 case PGM_PAGE_STATE_ZERO:
366 if ( PGM_PAGE_IS_MMIO(pPage)
367 || PGM_PAGE_IS_RESERVED(pPage))
368 return VERR_PGM_PHYS_PAGE_RESERVED;
369 /* fall thru */
370 case PGM_PAGE_STATE_SHARED:
371 return pgmPhysAllocPage(pVM, pPage, GCPhys);
372 }
373}
374
375
376/**
377 * Maps a page into the current virtual address space so it can be accessed.
378 *
379 * @returns VBox status code.
380 * @retval VINF_SUCCESS on success.
381 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
382 *
383 * @param pVM The VM address.
384 * @param pPage The physical page tracking structure.
385 * @param GCPhys The address of the page.
386 * @param ppMap Where to store the address of the mapping tracking structure.
387 * @param ppv Where to store the mapping address of the page. The page
388 * offset is masked off!
389 *
390 * @remarks Called from within the PGM critical section.
391 */
392int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
393{
394#ifdef IN_GC
395 /*
396 * Just some sketchy GC code.
397 */
398 *ppMap = NULL;
399 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
400 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
401 return PGMGCDynMapHCPage(pVM, HCPhys, ppv);
402
403#else /* IN_RING3 || IN_RING0 */
404
405 /*
406 * Find/make Chunk TLB entry for the mapping chunk.
407 */
408 PPGMCHUNKR3MAP pMap;
409 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
410 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
411 if (pTlbe->idChunk == idChunk)
412 {
413 STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbHits);
414 pMap = pTlbe->pChunk;
415 }
416 else if (idChunk != NIL_GMM_CHUNKID)
417 {
418 STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbMisses);
419
420 /*
421 * Find the chunk, map it if necessary.
422 */
423 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
424 if (!pMap)
425 {
426#ifdef IN_RING0
427 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
428 AssertRCReturn(rc, rc);
429 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
430 Assert(pMap);
431#else
432 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
433 if (VBOX_FAILURE(rc))
434 return rc;
435#endif
436 }
437
438 /*
439 * Enter it into the Chunk TLB.
440 */
441 pTlbe->idChunk = idChunk;
442 pTlbe->pChunk = pMap;
443 pMap->iAge = 0;
444 }
445 else
446 {
447 Assert(PGM_PAGE_IS_ZERO(pPage));
448 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
449 *ppMap = NULL;
450 return VINF_SUCCESS;
451 }
452
453 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
454 *ppMap = pMap;
455 return VINF_SUCCESS;
456#endif /* IN_RING3 */
457}
458
459
460#ifndef IN_GC
461/**
462 * Load a guest page into the ring-3 physical TLB.
463 *
464 * @returns VBox status code.
465 * @retval VINF_SUCCESS on success
466 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
467 * @param pPGM The PGM instance pointer.
468 * @param GCPhys The guest physical address in question.
469 */
470int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
471{
472 STAM_COUNTER_INC(&pPGM->CTXMID(StatPage,MapTlbMisses));
473
474 /*
475 * Find the ram range.
476 * 99.8% of requests are expected to be in the first range.
477 */
478 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
479 RTGCPHYS off = GCPhys - pRam->GCPhys;
480 if (RT_UNLIKELY(off >= pRam->cb))
481 {
482 do
483 {
484 pRam = CTXSUFF(pRam->pNext);
485 if (!pRam)
486 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
487 off = GCPhys - pRam->GCPhys;
488 } while (off >= pRam->cb);
489 }
490
491 /*
492 * Map the page.
493 * Make a special case for the zero page as it is kind of special.
494 */
495 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
496 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
497 if (!PGM_PAGE_IS_ZERO(pPage))
498 {
499 void *pv;
500 PPGMPAGEMAP pMap;
501 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
502 if (VBOX_FAILURE(rc))
503 return rc;
504 pTlbe->pMap = pMap;
505 pTlbe->pv = pv;
506 }
507 else
508 {
509 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
510 pTlbe->pMap = NULL;
511 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
512 }
513 pTlbe->pPage = pPage;
514 return VINF_SUCCESS;
515}
516#endif /* !IN_GC */
517
518
519/**
520 * Requests the mapping of a guest page into the current context.
521 *
522 * This API should only be used for very short term, as it will consume
523 * scarse resources (R0 and GC) in the mapping cache. When you're done
524 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
525 *
526 * This API will assume your intention is to write to the page, and will
527 * therefore replace shared and zero pages. If you do not intend to modify
528 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
529 *
530 * @returns VBox status code.
531 * @retval VINF_SUCCESS on success.
532 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
533 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
534 *
535 * @param pVM The VM handle.
536 * @param GCPhys The guest physical address of the page that should be mapped.
537 * @param ppv Where to store the address corresponding to GCPhys.
538 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
539 *
540 * @remark Avoid calling this API from within critical sections (other than
541 * the PGM one) because of the deadlock risk.
542 * @thread Any thread.
543 */
544PGMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
545{
546#ifdef NEW_PHYS_CODE
547#ifdef IN_GC
548 /* Until a physical TLB is implemented for GC, let PGMGCDynMapGCPageEx handle it. */
549 return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv);
550#else
551 int rc = pgmLock(pVM);
552 AssertRCReturn(rc);
553
554 /*
555 * Query the Physical TLB entry for the page (may fail).
556 */
557 PGMPHYSTLBE pTlbe;
558 int rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
559 if (RT_SUCCESS(rc))
560 {
561 /*
562 * If the page is shared, the zero page, or being write monitored
563 * it must be converted to an page that's writable if possible.
564 */
565 PPGMPAGE pPage = pTlbe->pPage;
566 if (RT_UNLIKELY(pPage->u2State != PGM_PAGE_STATE_ALLOCATED))
567 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
568 if (RT_SUCCESS(rc))
569 {
570 /*
571 * Now, just perform the locking and calculate the return address.
572 */
573 PPGMPAGEMAP pMap = pTlbe->pMap;
574 pMap->cRefs++;
575 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
576 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
577 {
578 AssertMsgFailed(("%VGp is entering permanent locked state!\n", GCPhys));
579 pMap->cRefs++; /* Extra ref to prevent it from going away. */
580 }
581
582 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
583 pLock->pvPage = pPage;
584 pLock->pvMap = pMap;
585 }
586 }
587
588 pgmUnlock(pVM);
589 return rc;
590
591#endif /* IN_RING3 || IN_RING0 */
592
593#else
594 /*
595 * Temporary fallback code.
596 */
597# ifdef IN_GC
598 return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv);
599# else
600 return PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1, ppv);
601# endif
602#endif
603}
604
605
606/**
607 * Requests the mapping of a guest page into the current context.
608 *
609 * This API should only be used for very short term, as it will consume
610 * scarse resources (R0 and GC) in the mapping cache. When you're done
611 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
612 *
613 * @returns VBox status code.
614 * @retval VINF_SUCCESS on success.
615 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
616 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
617 *
618 * @param pVM The VM handle.
619 * @param GCPhys The guest physical address of the page that should be mapped.
620 * @param ppv Where to store the address corresponding to GCPhys.
621 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
622 *
623 * @remark Avoid calling this API from within critical sections (other than
624 * the PGM one) because of the deadlock risk.
625 * @thread Any thread.
626 */
627PGMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void * const *ppv, PPGMPAGEMAPLOCK pLock)
628{
629 /** @todo implement this */
630 return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
631}
632
633
634/**
635 * Requests the mapping of a guest page given by virtual address into the current context.
636 *
637 * This API should only be used for very short term, as it will consume
638 * scarse resources (R0 and GC) in the mapping cache. When you're done
639 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
640 *
641 * This API will assume your intention is to write to the page, and will
642 * therefore replace shared and zero pages. If you do not intend to modify
643 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
644 *
645 * @returns VBox status code.
646 * @retval VINF_SUCCESS on success.
647 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
648 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
649 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
650 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
651 *
652 * @param pVM The VM handle.
653 * @param GCPhys The guest physical address of the page that should be mapped.
654 * @param ppv Where to store the address corresponding to GCPhys.
655 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
656 *
657 * @remark Avoid calling this API from within critical sections (other than
658 * the PGM one) because of the deadlock risk.
659 * @thread EMT
660 */
661PGMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
662{
663 RTGCPHYS GCPhys;
664 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
665 if (VBOX_SUCCESS(rc))
666 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, pLock);
667 return rc;
668}
669
670
671/**
672 * Requests the mapping of a guest page given by virtual address into the current context.
673 *
674 * This API should only be used for very short term, as it will consume
675 * scarse resources (R0 and GC) in the mapping cache. When you're done
676 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
677 *
678 * @returns VBox status code.
679 * @retval VINF_SUCCESS on success.
680 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
681 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
682 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
683 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
684 *
685 * @param pVM The VM handle.
686 * @param GCPhys The guest physical address of the page that should be mapped.
687 * @param ppv Where to store the address corresponding to GCPhys.
688 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
689 *
690 * @remark Avoid calling this API from within critical sections (other than
691 * the PGM one) because of the deadlock risk.
692 * @thread EMT
693 */
694PGMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void * const *ppv, PPGMPAGEMAPLOCK pLock)
695{
696 RTGCPHYS GCPhys;
697 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
698 if (VBOX_SUCCESS(rc))
699 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, pLock);
700 return rc;
701}
702
703
704/**
705 * Release the mapping of a guest page.
706 *
707 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
708 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
709 *
710 * @param pVM The VM handle.
711 * @param pLock The lock structure initialized by the mapping function.
712 */
713PGMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
714{
715#ifdef NEW_PHYS_CODE
716#ifdef IN_GC
717 /* currently nothing to do here. */
718/* --- postponed
719#elif defined(IN_RING0)
720*/
721
722#else /* IN_RING3 */
723 pgmLock(pVM);
724
725 PPGMPAGE pPage = (PPGMPAGE)pLock->pvPage;
726 Assert(pPage->cLocks >= 1);
727 if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
728 pPage->cLocks--;
729
730 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pLock->pvChunk;
731 Assert(pChunk->cRefs >= 1);
732 pChunk->cRefs--;
733 pChunk->iAge = 0;
734
735 pgmUnlock(pVM);
736#endif /* IN_RING3 */
737#else
738 NOREF(pVM);
739 NOREF(pLock);
740#endif
741}
742
743
744/**
745 * Converts a GC physical address to a HC pointer.
746 *
747 * @returns VINF_SUCCESS on success.
748 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
749 * page but has no physical backing.
750 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
751 * GC physical address.
752 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
753 * a dynamic ram chunk boundary
754 * @param pVM The VM handle.
755 * @param GCPhys The GC physical address to convert.
756 * @param cbRange Physical range
757 * @param pHCPtr Where to store the HC pointer on success.
758 */
759PGMDECL(int) PGMPhysGCPhys2HCPtr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTHCPTR pHCPtr)
760{
761#ifdef PGM_DYNAMIC_RAM_ALLOC
762 if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK))
763 {
764 AssertMsgFailed(("%VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
765 LogRel(("PGMPhysGCPhys2HCPtr %VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
766 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
767 }
768#endif
769
770 PPGMRAMRANGE pRam;
771 PPGMPAGE pPage;
772 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
773 if (VBOX_FAILURE(rc))
774 return rc;
775
776#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
777 if (RT_UNLIKELY(PGM_PAGE_IS_RESERVED(pPage)))
778 return VERR_PGM_PHYS_PAGE_RESERVED;
779#endif
780
781 RTGCPHYS off = GCPhys - pRam->GCPhys;
782 if (RT_UNLIKELY(off + cbRange > pRam->cb))
783 {
784 AssertMsgFailed(("%VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys + cbRange));
785 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
786 }
787
788 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
789 {
790 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
791 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
792 }
793 else if (RT_LIKELY(pRam->pvHC))
794 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
795 else
796 return VERR_PGM_PHYS_PAGE_RESERVED;
797 return VINF_SUCCESS;
798}
799
800
801/**
802 * Converts a guest pointer to a GC physical address.
803 *
804 * This uses the current CR3/CR0/CR4 of the guest.
805 *
806 * @returns VBox status code.
807 * @param pVM The VM Handle
808 * @param GCPtr The guest pointer to convert.
809 * @param pGCPhys Where to store the GC physical address.
810 */
811PGMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
812{
813 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
814 if (pGCPhys && VBOX_SUCCESS(rc))
815 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
816 return rc;
817}
818
819
820/**
821 * Converts a guest pointer to a HC physical address.
822 *
823 * This uses the current CR3/CR0/CR4 of the guest.
824 *
825 * @returns VBox status code.
826 * @param pVM The VM Handle
827 * @param GCPtr The guest pointer to convert.
828 * @param pHCPhys Where to store the HC physical address.
829 */
830PGMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
831{
832 RTGCPHYS GCPhys;
833 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
834 if (VBOX_SUCCESS(rc))
835 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
836 return rc;
837}
838
839
840/**
841 * Converts a guest pointer to a HC pointer.
842 *
843 * This uses the current CR3/CR0/CR4 of the guest.
844 *
845 * @returns VBox status code.
846 * @param pVM The VM Handle
847 * @param GCPtr The guest pointer to convert.
848 * @param pHCPtr Where to store the HC virtual address.
849 */
850PGMDECL(int) PGMPhysGCPtr2HCPtr(PVM pVM, RTGCPTR GCPtr, PRTHCPTR pHCPtr)
851{
852 RTGCPHYS GCPhys;
853 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
854 if (VBOX_SUCCESS(rc))
855 rc = PGMPhysGCPhys2HCPtr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
856 return rc;
857}
858
859
860/**
861 * Converts a guest virtual address to a HC pointer by specfied CR3 and flags.
862 *
863 * @returns VBox status code.
864 * @param pVM The VM Handle
865 * @param GCPtr The guest pointer to convert.
866 * @param cr3 The guest CR3.
867 * @param fFlags Flags used for interpreting the PD correctly: X86_CR4_PSE and X86_CR4_PAE
868 * @param pHCPtr Where to store the HC pointer.
869 *
870 * @remark This function is used by the REM at a time where PGM could
871 * potentially not be in sync. It could also be used by a
872 * future DBGF API to cpu state independent conversions.
873 */
874PGMDECL(int) PGMPhysGCPtr2HCPtrByGstCR3(PVM pVM, RTGCPTR GCPtr, uint32_t cr3, unsigned fFlags, PRTHCPTR pHCPtr)
875{
876 /*
877 * PAE or 32-bit?
878 */
879 int rc;
880 if (!(fFlags & X86_CR4_PAE))
881 {
882 PX86PD pPD;
883 rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAGE_MASK, &pPD);
884 if (VBOX_SUCCESS(rc))
885 {
886 VBOXPDE Pde = pPD->a[(RTGCUINTPTR)GCPtr >> X86_PD_SHIFT];
887 if (Pde.n.u1Present)
888 {
889 if ((fFlags & X86_CR4_PSE) && Pde.b.u1Size)
890 { /* (big page) */
891 rc = PGMPhysGCPhys2HCPtr(pVM, (Pde.u & X86_PDE4M_PG_MASK) | ((RTGCUINTPTR)GCPtr & X86_PAGE_4M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
892 }
893 else
894 { /* (normal page) */
895 PVBOXPT pPT;
896 rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & X86_PDE_PG_MASK, &pPT);
897 if (VBOX_SUCCESS(rc))
898 {
899 VBOXPTE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_SHIFT) & X86_PT_MASK];
900 if (Pte.n.u1Present)
901 return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
902 rc = VERR_PAGE_NOT_PRESENT;
903 }
904 }
905 }
906 else
907 rc = VERR_PAGE_TABLE_NOT_PRESENT;
908 }
909 }
910 else
911 {
912 /** @todo long mode! */
913 PX86PDPTR pPdptr;
914 rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, &pPdptr);
915 if (VBOX_SUCCESS(rc))
916 {
917 X86PDPE Pdpe = pPdptr->a[((RTGCUINTPTR)GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK];
918 if (Pdpe.n.u1Present)
919 {
920 PX86PDPAE pPD;
921 rc = PGM_GCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPD);
922 if (VBOX_SUCCESS(rc))
923 {
924 X86PDEPAE Pde = pPD->a[((RTGCUINTPTR)GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK];
925 if (Pde.n.u1Present)
926 {
927 if ((fFlags & X86_CR4_PSE) && Pde.b.u1Size)
928 { /* (big page) */
929 rc = PGMPhysGCPhys2HCPtr(pVM, (Pde.u & X86_PDE4M_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & X86_PAGE_4M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
930 }
931 else
932 { /* (normal page) */
933 PX86PTPAE pPT;
934 rc = PGM_GCPHYS_2_PTR(pVM, (Pde.u & X86_PDE_PAE_PG_MASK), &pPT);
935 if (VBOX_SUCCESS(rc))
936 {
937 X86PTEPAE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK];
938 if (Pte.n.u1Present)
939 return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
940 rc = VERR_PAGE_NOT_PRESENT;
941 }
942 }
943 }
944 else
945 rc = VERR_PAGE_TABLE_NOT_PRESENT;
946 }
947 }
948 else
949 rc = VERR_PAGE_TABLE_NOT_PRESENT;
950 }
951 }
952 return rc;
953}
954
955
956#undef LOG_GROUP
957#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
958
959
960#ifdef IN_RING3
961/**
962 * Cache PGMPhys memory access
963 *
964 * @param pVM VM Handle.
965 * @param pCache Cache structure pointer
966 * @param GCPhys GC physical address
967 * @param pbHC HC pointer corresponding to physical page
968 *
969 * @thread EMT.
970 */
971static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbHC)
972{
973 uint32_t iCacheIndex;
974
975 GCPhys = PAGE_ADDRESS(GCPhys);
976 pbHC = (uint8_t *)PAGE_ADDRESS(pbHC);
977
978 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
979
980 ASMBitSet(&pCache->aEntries, iCacheIndex);
981
982 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
983 pCache->Entry[iCacheIndex].pbHC = pbHC;
984}
985#endif
986
987/**
988 * Read physical memory.
989 *
990 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
991 * want to ignore those.
992 *
993 * @param pVM VM Handle.
994 * @param GCPhys Physical address start reading from.
995 * @param pvBuf Where to put the read bits.
996 * @param cbRead How many bytes to read.
997 */
998PGMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
999{
1000#ifdef IN_RING3
1001 bool fGrabbedLock = false;
1002#endif
1003
1004 AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n"));
1005 if (cbRead == 0)
1006 return;
1007
1008 LogFlow(("PGMPhysRead: %VGp %d\n", GCPhys, cbRead));
1009
1010#ifdef IN_RING3
1011 if (!VM_IS_EMT(pVM))
1012 {
1013 pgmLock(pVM);
1014 fGrabbedLock = true;
1015 }
1016#endif
1017
1018 /*
1019 * Copy loop on ram ranges.
1020 */
1021 PPGMRAMRANGE pCur = CTXSUFF(pVM->pgm.s.pRamRanges);
1022 for (;;)
1023 {
1024 /* Find range. */
1025 while (pCur && GCPhys > pCur->GCPhysLast)
1026 pCur = CTXSUFF(pCur->pNext);
1027 /* Inside range or not? */
1028 if (pCur && GCPhys >= pCur->GCPhys)
1029 {
1030 /*
1031 * Must work our way thru this page by page.
1032 */
1033 RTGCPHYS off = GCPhys - pCur->GCPhys;
1034 while (off < pCur->cb)
1035 {
1036 unsigned iPage = off >> PAGE_SHIFT;
1037 PPGMPAGE pPage = &pCur->aPages[iPage];
1038 size_t cb;
1039
1040 /* Physical chunk in dynamically allocated range not present? */
1041 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
1042 {
1043 /* Treat it as reserved; return zeros */
1044 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1045 if (cb >= cbRead)
1046 {
1047 memset(pvBuf, 0, cbRead);
1048 goto end;
1049 }
1050 memset(pvBuf, 0, cb);
1051 }
1052 else
1053 {
1054 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_ROM)) /** @todo PAGE FLAGS */
1055 {
1056 /*
1057 * Normal memory or ROM.
1058 */
1059 case 0:
1060 case MM_RAM_FLAGS_ROM:
1061 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED:
1062 //case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* = shadow */ - //MMIO2 isn't in the mask.
1063 case MM_RAM_FLAGS_PHYSICAL_WRITE:
1064 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_WRITE: // MMIO2 isn't in the mask.
1065 case MM_RAM_FLAGS_VIRTUAL_WRITE:
1066 {
1067#ifdef IN_GC
1068 void *pvSrc = NULL;
1069 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc);
1070 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
1071#else
1072 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1073#endif
1074 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1075 if (cb >= cbRead)
1076 {
1077#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
1078 if (cbRead <= 4 && !fGrabbedLock /* i.e. EMT */)
1079 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphysreadcache, GCPhys, (uint8_t*)pvSrc);
1080#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
1081 memcpy(pvBuf, pvSrc, cbRead);
1082 goto end;
1083 }
1084 memcpy(pvBuf, pvSrc, cb);
1085 break;
1086 }
1087
1088 /*
1089 * All reserved, nothing there.
1090 */
1091 case MM_RAM_FLAGS_RESERVED:
1092 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1093 if (cb >= cbRead)
1094 {
1095 memset(pvBuf, 0, cbRead);
1096 goto end;
1097 }
1098 memset(pvBuf, 0, cb);
1099 break;
1100
1101 /*
1102 * Physical handler.
1103 */
1104 case MM_RAM_FLAGS_PHYSICAL_ALL:
1105 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_ALL: /** r=bird: MMIO2 isn't in the mask! */
1106 {
1107 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1108 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1109#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1110
1111 /* find and call the handler */
1112 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1113 if (pNode && pNode->pfnHandlerR3)
1114 {
1115 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1116 if (cbRange < cb)
1117 cb = cbRange;
1118 if (cb > cbRead)
1119 cb = cbRead;
1120
1121 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1122
1123 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1124 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pNode->pvUserR3);
1125 }
1126#endif /* IN_RING3 */
1127 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1128 {
1129#ifdef IN_GC
1130 void *pvSrc = NULL;
1131 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc);
1132 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
1133#else
1134 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1135#endif
1136
1137 if (cb >= cbRead)
1138 {
1139 memcpy(pvBuf, pvSrc, cbRead);
1140 goto end;
1141 }
1142 memcpy(pvBuf, pvSrc, cb);
1143 }
1144 else if (cb >= cbRead)
1145 goto end;
1146 break;
1147 }
1148
1149 case MM_RAM_FLAGS_VIRTUAL_ALL:
1150 {
1151 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1152 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1153#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1154 /* Search the whole tree for matching physical addresses (rather expensive!) */
1155 PPGMVIRTHANDLER pNode;
1156 unsigned iPage;
1157 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
1158 if (VBOX_SUCCESS(rc2) && pNode->pfnHandlerHC)
1159 {
1160 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1161 if (cbRange < cb)
1162 cb = cbRange;
1163 if (cb > cbRead)
1164 cb = cbRead;
1165 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->GCPtr & PAGE_BASE_GC_MASK)
1166 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1167
1168 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1169
1170 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1171 rc = pNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, 0);
1172 }
1173#endif /* IN_RING3 */
1174 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1175 {
1176#ifdef IN_GC
1177 void *pvSrc = NULL;
1178 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc);
1179 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
1180#else
1181 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1182#endif
1183 if (cb >= cbRead)
1184 {
1185 memcpy(pvBuf, pvSrc, cbRead);
1186 goto end;
1187 }
1188 memcpy(pvBuf, pvSrc, cb);
1189 }
1190 else if (cb >= cbRead)
1191 goto end;
1192 break;
1193 }
1194
1195 /*
1196 * The rest needs to be taken more carefully.
1197 */
1198 default:
1199#if 1 /** @todo r=bird: Can you do this properly please. */
1200 /** @todo Try MMIO; quick hack */
1201 if (cbRead <= 4 && IOMMMIORead(pVM, GCPhys, (uint32_t *)pvBuf, cbRead) == VINF_SUCCESS)
1202 goto end;
1203#endif
1204
1205 /** @todo fix me later. */
1206 AssertReleaseMsgFailed(("Unknown read at %VGp size %d implement the complex physical reading case %x\n",
1207 GCPhys, cbRead,
1208 pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_ROM))); /** @todo PAGE FLAGS */
1209 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1210 break;
1211 }
1212 }
1213 cbRead -= cb;
1214 off += cb;
1215 pvBuf = (char *)pvBuf + cb;
1216 }
1217
1218 GCPhys = pCur->GCPhysLast + 1;
1219 }
1220 else
1221 {
1222 LogFlow(("PGMPhysRead: Unassigned %VGp size=%d\n", GCPhys, cbRead));
1223
1224 /*
1225 * Unassigned address space.
1226 */
1227 size_t cb;
1228 if ( !pCur
1229 || (cb = pCur->GCPhys - GCPhys) >= cbRead)
1230 {
1231 memset(pvBuf, 0, cbRead);
1232 goto end;
1233 }
1234
1235 memset(pvBuf, 0, cb);
1236 cbRead -= cb;
1237 pvBuf = (char *)pvBuf + cb;
1238 GCPhys += cb;
1239 }
1240 }
1241end:
1242#ifdef IN_RING3
1243 if (fGrabbedLock)
1244 pgmUnlock(pVM);
1245#endif
1246 return;
1247}
1248
1249/**
1250 * Write to physical memory.
1251 *
1252 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1253 * want to ignore those.
1254 *
1255 * @param pVM VM Handle.
1256 * @param GCPhys Physical address to write to.
1257 * @param pvBuf What to write.
1258 * @param cbWrite How many bytes to write.
1259 */
1260PGMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
1261{
1262#ifdef IN_RING3
1263 bool fGrabbedLock = false;
1264#endif
1265
1266 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
1267 AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n"));
1268 if (cbWrite == 0)
1269 return;
1270
1271 LogFlow(("PGMPhysWrite: %VGp %d\n", GCPhys, cbWrite));
1272
1273#ifdef IN_RING3
1274 if (!VM_IS_EMT(pVM))
1275 {
1276 pgmLock(pVM);
1277 fGrabbedLock = true;
1278 }
1279#endif
1280 /*
1281 * Copy loop on ram ranges.
1282 */
1283 PPGMRAMRANGE pCur = CTXSUFF(pVM->pgm.s.pRamRanges);
1284 for (;;)
1285 {
1286 /* Find range. */
1287 while (pCur && GCPhys > pCur->GCPhysLast)
1288 pCur = CTXSUFF(pCur->pNext);
1289 /* Inside range or not? */
1290 if (pCur && GCPhys >= pCur->GCPhys)
1291 {
1292 /*
1293 * Must work our way thru this page by page.
1294 */
1295 unsigned off = GCPhys - pCur->GCPhys;
1296 while (off < pCur->cb)
1297 {
1298 unsigned iPage = off >> PAGE_SHIFT;
1299 PPGMPAGE pPage = &pCur->aPages[iPage];
1300
1301 /* Physical chunk in dynamically allocated range not present? */
1302 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
1303 {
1304 int rc;
1305#ifdef IN_RING3
1306 if (fGrabbedLock)
1307 {
1308 pgmUnlock(pVM);
1309 rc = pgmr3PhysGrowRange(pVM, GCPhys);
1310 if (rc == VINF_SUCCESS)
1311 PGMPhysWrite(pVM, GCPhys, pvBuf, cbWrite); /* try again; can't assume pCur is still valid (paranoia) */
1312 return;
1313 }
1314 rc = pgmr3PhysGrowRange(pVM, GCPhys);
1315#else
1316 rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1317#endif
1318 if (rc != VINF_SUCCESS)
1319 goto end;
1320 }
1321
1322 size_t cb;
1323 /** @todo r=bird: missing MM_RAM_FLAGS_ROM here, we shall not allow anyone to overwrite the ROM! */
1324 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE)) /** @todo PAGE FLAGS */
1325 {
1326 /*
1327 * Normal memory, MMIO2 or writable shadow ROM.
1328 */
1329 case 0:
1330 case MM_RAM_FLAGS_MMIO2:
1331 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* shadow rom */
1332 {
1333#ifdef IN_GC
1334 void *pvDst = NULL;
1335 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1336 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1337#else
1338 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1339#endif
1340 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1341 if (cb >= cbWrite)
1342 {
1343#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
1344 if (cbWrite <= 4 && !fGrabbedLock /* i.e. EMT */)
1345 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphyswritecache, GCPhys, (uint8_t*)pvDst);
1346#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
1347 memcpy(pvDst, pvBuf, cbWrite);
1348 goto end;
1349 }
1350 memcpy(pvDst, pvBuf, cb);
1351 break;
1352 }
1353
1354 /*
1355 * All reserved, nothing there.
1356 */
1357 case MM_RAM_FLAGS_RESERVED:
1358 case MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2:
1359 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1360 if (cb >= cbWrite)
1361 goto end;
1362 break;
1363
1364 /*
1365 * Physical handler.
1366 */
1367 case MM_RAM_FLAGS_PHYSICAL_ALL:
1368 case MM_RAM_FLAGS_PHYSICAL_WRITE:
1369 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_ALL:
1370 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_WRITE:
1371 {
1372 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1373 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1374#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1375 /* find and call the handler */
1376 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1377 if (pNode && pNode->pfnHandlerR3)
1378 {
1379 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1380 if (cbRange < cb)
1381 cb = cbRange;
1382 if (cb > cbWrite)
1383 cb = cbWrite;
1384
1385 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1386
1387 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1388 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pNode->pvUserR3);
1389 }
1390#endif /* IN_RING3 */
1391 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1392 {
1393#ifdef IN_GC
1394 void *pvDst = NULL;
1395 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1396 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1397#else
1398 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1399#endif
1400 if (cb >= cbWrite)
1401 {
1402 memcpy(pvDst, pvBuf, cbWrite);
1403 goto end;
1404 }
1405 memcpy(pvDst, pvBuf, cb);
1406 }
1407 else if (cb >= cbWrite)
1408 goto end;
1409 break;
1410 }
1411
1412 case MM_RAM_FLAGS_VIRTUAL_ALL:
1413 case MM_RAM_FLAGS_VIRTUAL_WRITE:
1414 {
1415 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1416 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1417#ifdef IN_RING3
1418/** @todo deal with this in GC and R0! */
1419 /* Search the whole tree for matching physical addresses (rather expensive!) */
1420 PPGMVIRTHANDLER pNode;
1421 unsigned iPage;
1422 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
1423 if (VBOX_SUCCESS(rc2) && pNode->pfnHandlerHC)
1424 {
1425 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1426 if (cbRange < cb)
1427 cb = cbRange;
1428 if (cb > cbWrite)
1429 cb = cbWrite;
1430 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->GCPtr & PAGE_BASE_GC_MASK)
1431 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1432
1433 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1434
1435 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1436 rc = pNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
1437 }
1438#endif /* IN_RING3 */
1439 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1440 {
1441#ifdef IN_GC
1442 void *pvDst = NULL;
1443 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1444 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1445#else
1446 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1447#endif
1448 if (cb >= cbWrite)
1449 {
1450 memcpy(pvDst, pvBuf, cbWrite);
1451 goto end;
1452 }
1453 memcpy(pvDst, pvBuf, cb);
1454 }
1455 else if (cb >= cbWrite)
1456 goto end;
1457 break;
1458 }
1459
1460 /*
1461 * Physical write handler + virtual write handler.
1462 * Consider this a quick workaround for the CSAM + shadow caching problem.
1463 *
1464 * We hand it to the shadow caching first since it requires the unchanged
1465 * data. CSAM will have to put up with it already being changed.
1466 */
1467 case MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_VIRTUAL_WRITE:
1468 {
1469 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1470 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1471#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1472 /* 1. The physical handler */
1473 PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1474 if (pPhysNode && pPhysNode->pfnHandlerR3)
1475 {
1476 size_t cbRange = pPhysNode->Core.KeyLast - GCPhys + 1;
1477 if (cbRange < cb)
1478 cb = cbRange;
1479 if (cb > cbWrite)
1480 cb = cbWrite;
1481
1482 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1483
1484 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1485 rc = pPhysNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pPhysNode->pvUserR3);
1486 }
1487
1488 /* 2. The virtual handler (will see incorrect data) */
1489 PPGMVIRTHANDLER pVirtNode;
1490 unsigned iPage;
1491 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirtNode, &iPage);
1492 if (VBOX_SUCCESS(rc2) && pVirtNode->pfnHandlerHC)
1493 {
1494 size_t cbRange = pVirtNode->Core.KeyLast - GCPhys + 1;
1495 if (cbRange < cb)
1496 cb = cbRange;
1497 if (cb > cbWrite)
1498 cb = cbWrite;
1499 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirtNode->GCPtr & PAGE_BASE_GC_MASK)
1500 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1501
1502 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1503
1504 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1505 rc2 = pVirtNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
1506 if ( ( rc2 != VINF_PGM_HANDLER_DO_DEFAULT
1507 && rc == VINF_PGM_HANDLER_DO_DEFAULT)
1508 || ( VBOX_FAILURE(rc2)
1509 && VBOX_SUCCESS(rc)))
1510 rc = rc2;
1511 }
1512#endif /* IN_RING3 */
1513 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1514 {
1515#ifdef IN_GC
1516 void *pvDst = NULL;
1517 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1518 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1519#else
1520 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1521#endif
1522 if (cb >= cbWrite)
1523 {
1524 memcpy(pvDst, pvBuf, cbWrite);
1525 goto end;
1526 }
1527 memcpy(pvDst, pvBuf, cb);
1528 }
1529 else if (cb >= cbWrite)
1530 goto end;
1531 break;
1532 }
1533
1534
1535 /*
1536 * The rest needs to be taken more carefully.
1537 */
1538 default:
1539#if 1 /** @todo r=bird: Can you do this properly please. */
1540 /** @todo Try MMIO; quick hack */
1541 if (cbWrite <= 4 && IOMMMIOWrite(pVM, GCPhys, *(uint32_t *)pvBuf, cbWrite) == VINF_SUCCESS)
1542 goto end;
1543#endif
1544
1545 /** @todo fix me later. */
1546 AssertReleaseMsgFailed(("Unknown write at %VGp size %d implement the complex physical writing case %x\n",
1547 GCPhys, cbWrite,
1548 (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE)))); /** @todo PAGE FLAGS */
1549 /* skip the write */
1550 cb = cbWrite;
1551 break;
1552 }
1553
1554 cbWrite -= cb;
1555 off += cb;
1556 pvBuf = (const char *)pvBuf + cb;
1557 }
1558
1559 GCPhys = pCur->GCPhysLast + 1;
1560 }
1561 else
1562 {
1563 /*
1564 * Unassigned address space.
1565 */
1566 size_t cb;
1567 if ( !pCur
1568 || (cb = pCur->GCPhys - GCPhys) >= cbWrite)
1569 goto end;
1570
1571 cbWrite -= cb;
1572 pvBuf = (const char *)pvBuf + cb;
1573 GCPhys += cb;
1574 }
1575 }
1576end:
1577#ifdef IN_RING3
1578 if (fGrabbedLock)
1579 pgmUnlock(pVM);
1580#endif
1581 return;
1582}
1583
1584#ifndef IN_GC /* Ring 0 & 3 only */
1585
1586/**
1587 * Read from guest physical memory by GC physical address, bypassing
1588 * MMIO and access handlers.
1589 *
1590 * @returns VBox status.
1591 * @param pVM VM handle.
1592 * @param pvDst The destination address.
1593 * @param GCPhysSrc The source address (GC physical address).
1594 * @param cb The number of bytes to read.
1595 */
1596PGMDECL(int) PGMPhysReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
1597{
1598 /*
1599 * Anything to be done?
1600 */
1601 if (!cb)
1602 return VINF_SUCCESS;
1603
1604 /*
1605 * Loop ram ranges.
1606 */
1607 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
1608 pRam;
1609 pRam = pRam->CTXSUFF(pNext))
1610 {
1611 RTGCPHYS off = GCPhysSrc - pRam->GCPhys;
1612 if (off < pRam->cb)
1613 {
1614 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1615 {
1616 /* Copy page by page as we're not dealing with a linear HC range. */
1617 for (;;)
1618 {
1619 /* convert */
1620 void *pvSrc;
1621 int rc = pgmRamGCPhys2HCPtrWithRange(pVM, pRam, GCPhysSrc, &pvSrc);
1622 if (VBOX_FAILURE(rc))
1623 return rc;
1624
1625 /* copy */
1626 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPhysSrc & PAGE_OFFSET_MASK);
1627 if (cbRead >= cb)
1628 {
1629 memcpy(pvDst, pvSrc, cb);
1630 return VINF_SUCCESS;
1631 }
1632 memcpy(pvDst, pvSrc, cbRead);
1633
1634 /* next */
1635 cb -= cbRead;
1636 pvDst = (uint8_t *)pvDst + cbRead;
1637 GCPhysSrc += cbRead;
1638 }
1639 }
1640 else if (pRam->pvHC)
1641 {
1642 /* read */
1643 size_t cbRead = pRam->cb - off;
1644 if (cbRead >= cb)
1645 {
1646 memcpy(pvDst, (uint8_t *)pRam->pvHC + off, cb);
1647 return VINF_SUCCESS;
1648 }
1649 memcpy(pvDst, (uint8_t *)pRam->pvHC + off, cbRead);
1650
1651 /* next */
1652 cb -= cbRead;
1653 pvDst = (uint8_t *)pvDst + cbRead;
1654 GCPhysSrc += cbRead;
1655 }
1656 else
1657 return VERR_PGM_PHYS_PAGE_RESERVED;
1658 }
1659 else if (GCPhysSrc < pRam->GCPhysLast)
1660 break;
1661 }
1662 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1663}
1664
1665
1666/**
1667 * Write to guest physical memory referenced by GC pointer.
1668 * Write memory to GC physical address in guest physical memory.
1669 *
1670 * This will bypass MMIO and access handlers.
1671 *
1672 * @returns VBox status.
1673 * @param pVM VM handle.
1674 * @param GCPhysDst The GC physical address of the destination.
1675 * @param pvSrc The source buffer.
1676 * @param cb The number of bytes to write.
1677 */
1678PGMDECL(int) PGMPhysWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
1679{
1680 /*
1681 * Anything to be done?
1682 */
1683 if (!cb)
1684 return VINF_SUCCESS;
1685
1686 LogFlow(("PGMPhysWriteGCPhys: %VGp %d\n", GCPhysDst, cb));
1687
1688 /*
1689 * Loop ram ranges.
1690 */
1691 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
1692 pRam;
1693 pRam = pRam->CTXSUFF(pNext))
1694 {
1695 RTGCPHYS off = GCPhysDst - pRam->GCPhys;
1696 if (off < pRam->cb)
1697 {
1698#ifdef NEW_PHYS_CODE
1699/** @todo PGMRamGCPhys2HCPtrWithRange. */
1700#endif
1701 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1702 {
1703 /* Copy page by page as we're not dealing with a linear HC range. */
1704 for (;;)
1705 {
1706 /* convert */
1707 void *pvDst;
1708 int rc = pgmRamGCPhys2HCPtrWithRange(pVM, pRam, GCPhysDst, &pvDst);
1709 if (VBOX_FAILURE(rc))
1710 return rc;
1711
1712 /* copy */
1713 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPhysDst & PAGE_OFFSET_MASK);
1714 if (cbWrite >= cb)
1715 {
1716 memcpy(pvDst, pvSrc, cb);
1717 return VINF_SUCCESS;
1718 }
1719 memcpy(pvDst, pvSrc, cbWrite);
1720
1721 /* next */
1722 cb -= cbWrite;
1723 pvSrc = (uint8_t *)pvSrc + cbWrite;
1724 GCPhysDst += cbWrite;
1725 }
1726 }
1727 else if (pRam->pvHC)
1728 {
1729 /* write */
1730 size_t cbWrite = pRam->cb - off;
1731 if (cbWrite >= cb)
1732 {
1733 memcpy((uint8_t *)pRam->pvHC + off, pvSrc, cb);
1734 return VINF_SUCCESS;
1735 }
1736 memcpy((uint8_t *)pRam->pvHC + off, pvSrc, cbWrite);
1737
1738 /* next */
1739 cb -= cbWrite;
1740 GCPhysDst += cbWrite;
1741 pvSrc = (uint8_t *)pvSrc + cbWrite;
1742 }
1743 else
1744 return VERR_PGM_PHYS_PAGE_RESERVED;
1745 }
1746 else if (GCPhysDst < pRam->GCPhysLast)
1747 break;
1748 }
1749 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1750}
1751
1752
1753/**
1754 * Read from guest physical memory referenced by GC pointer.
1755 *
1756 * This function uses the current CR3/CR0/CR4 of the guest and will
1757 * bypass access handlers and not set any accessed bits.
1758 *
1759 * @returns VBox status.
1760 * @param pVM VM handle.
1761 * @param pvDst The destination address.
1762 * @param GCPtrSrc The source address (GC pointer).
1763 * @param cb The number of bytes to read.
1764 */
1765PGMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1766{
1767 /*
1768 * Anything to do?
1769 */
1770 if (!cb)
1771 return VINF_SUCCESS;
1772
1773 /*
1774 * Optimize reads within a single page.
1775 */
1776 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1777 {
1778 void *pvSrc;
1779 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrSrc, &pvSrc);
1780 if (VBOX_FAILURE(rc))
1781 return rc;
1782 memcpy(pvDst, pvSrc, cb);
1783 return VINF_SUCCESS;
1784 }
1785
1786 /*
1787 * Page by page.
1788 */
1789 for (;;)
1790 {
1791 /* convert */
1792 void *pvSrc;
1793 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrSrc, &pvSrc);
1794 if (VBOX_FAILURE(rc))
1795 return rc;
1796
1797 /* copy */
1798 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
1799 if (cbRead >= cb)
1800 {
1801 memcpy(pvDst, pvSrc, cb);
1802 return VINF_SUCCESS;
1803 }
1804 memcpy(pvDst, pvSrc, cbRead);
1805
1806 /* next */
1807 cb -= cbRead;
1808 pvDst = (uint8_t *)pvDst + cbRead;
1809 GCPtrSrc += cbRead;
1810 }
1811}
1812
1813
1814/**
1815 * Write to guest physical memory referenced by GC pointer.
1816 *
1817 * This function uses the current CR3/CR0/CR4 of the guest and will
1818 * bypass access handlers and not set dirty or accessed bits.
1819 *
1820 * @returns VBox status.
1821 * @param pVM VM handle.
1822 * @param GCPtrDst The destination address (GC pointer).
1823 * @param pvSrc The source address.
1824 * @param cb The number of bytes to write.
1825 */
1826PGMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
1827{
1828 /*
1829 * Anything to do?
1830 */
1831 if (!cb)
1832 return VINF_SUCCESS;
1833
1834 LogFlow(("PGMPhysWriteGCPtr: %VGv %d\n", GCPtrDst, cb));
1835
1836 /*
1837 * Optimize writes within a single page.
1838 */
1839 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1840 {
1841 void *pvDst;
1842 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1843 if (VBOX_FAILURE(rc))
1844 return rc;
1845 memcpy(pvDst, pvSrc, cb);
1846 return VINF_SUCCESS;
1847 }
1848
1849 /*
1850 * Page by page.
1851 */
1852 for (;;)
1853 {
1854 /* convert */
1855 void *pvDst;
1856 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1857 if (VBOX_FAILURE(rc))
1858 return rc;
1859
1860 /* copy */
1861 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
1862 if (cbWrite >= cb)
1863 {
1864 memcpy(pvDst, pvSrc, cb);
1865 return VINF_SUCCESS;
1866 }
1867 memcpy(pvDst, pvSrc, cbWrite);
1868
1869 /* next */
1870 cb -= cbWrite;
1871 pvSrc = (uint8_t *)pvSrc + cbWrite;
1872 GCPtrDst += cbWrite;
1873 }
1874}
1875
1876/**
1877 * Read from guest physical memory referenced by GC pointer.
1878 *
1879 * This function uses the current CR3/CR0/CR4 of the guest and will
1880 * respect access handlers and set accessed bits.
1881 *
1882 * @returns VBox status.
1883 * @param pVM VM handle.
1884 * @param pvDst The destination address.
1885 * @param GCPtrSrc The source address (GC pointer).
1886 * @param cb The number of bytes to read.
1887 */
1888/** @todo use the PGMPhysReadGCPtr name and rename the unsafe one to something appropriate */
1889PGMDECL(int) PGMPhysReadGCPtrSafe(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1890{
1891 RTGCPHYS GCPhys;
1892 int rc;
1893
1894 /*
1895 * Anything to do?
1896 */
1897 if (!cb)
1898 return VINF_SUCCESS;
1899
1900 LogFlow(("PGMPhysReadGCPtrSafe: %VGv %d\n", GCPtrSrc, cb));
1901
1902 /*
1903 * Optimize reads within a single page.
1904 */
1905 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1906 {
1907 /* Convert virtual to physical address */
1908 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrSrc, &GCPhys);
1909 AssertRCReturn(rc, rc);
1910
1911 /* mark the guest page as accessed. */
1912 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
1913 AssertRC(rc);
1914
1915 PGMPhysRead(pVM, GCPhys, pvDst, cb);
1916 return VINF_SUCCESS;
1917 }
1918
1919 /*
1920 * Page by page.
1921 */
1922 for (;;)
1923 {
1924 /* Convert virtual to physical address */
1925 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrSrc, &GCPhys);
1926 AssertRCReturn(rc, rc);
1927
1928 /* mark the guest page as accessed. */
1929 int rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
1930 AssertRC(rc);
1931
1932 /* copy */
1933 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
1934 if (cbRead >= cb)
1935 {
1936 PGMPhysRead(pVM, GCPhys, pvDst, cb);
1937 return VINF_SUCCESS;
1938 }
1939 PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
1940
1941 /* next */
1942 cb -= cbRead;
1943 pvDst = (uint8_t *)pvDst + cbRead;
1944 GCPtrSrc += cbRead;
1945 }
1946}
1947
1948
1949/**
1950 * Write to guest physical memory referenced by GC pointer.
1951 *
1952 * This function uses the current CR3/CR0/CR4 of the guest and will
1953 * respect access handlers and set dirty and accessed bits.
1954 *
1955 * @returns VBox status.
1956 * @param pVM VM handle.
1957 * @param GCPtrDst The destination address (GC pointer).
1958 * @param pvSrc The source address.
1959 * @param cb The number of bytes to write.
1960 */
1961/** @todo use the PGMPhysWriteGCPtr name and rename the unsafe one to something appropriate */
1962PGMDECL(int) PGMPhysWriteGCPtrSafe(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
1963{
1964 RTGCPHYS GCPhys;
1965 int rc;
1966
1967 /*
1968 * Anything to do?
1969 */
1970 if (!cb)
1971 return VINF_SUCCESS;
1972
1973 LogFlow(("PGMPhysWriteGCPtrSafe: %VGv %d\n", GCPtrDst, cb));
1974
1975 /*
1976 * Optimize writes within a single page.
1977 */
1978 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1979 {
1980 /* Convert virtual to physical address */
1981 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrDst, &GCPhys);
1982 AssertRCReturn(rc, rc);
1983
1984 /* mark the guest page as accessed and dirty. */
1985 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
1986 AssertRC(rc);
1987
1988 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
1989 return VINF_SUCCESS;
1990 }
1991
1992 /*
1993 * Page by page.
1994 */
1995 for (;;)
1996 {
1997 /* Convert virtual to physical address */
1998 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrDst, &GCPhys);
1999 AssertRCReturn(rc, rc);
2000
2001 /* mark the guest page as accessed and dirty. */
2002 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2003 AssertRC(rc);
2004
2005 /* copy */
2006 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2007 if (cbWrite >= cb)
2008 {
2009 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2010 return VINF_SUCCESS;
2011 }
2012 PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2013
2014 /* next */
2015 cb -= cbWrite;
2016 pvSrc = (uint8_t *)pvSrc + cbWrite;
2017 GCPtrDst += cbWrite;
2018 }
2019}
2020
2021/**
2022 * Write to guest physical memory referenced by GC pointer and update the PTE.
2023 *
2024 * This function uses the current CR3/CR0/CR4 of the guest and will
2025 * bypass access handlers and set any dirty and accessed bits in the PTE.
2026 *
2027 * If you don't want to set the dirty bit, use PGMPhysWriteGCPtr().
2028 *
2029 * @returns VBox status.
2030 * @param pVM VM handle.
2031 * @param GCPtrDst The destination address (GC pointer).
2032 * @param pvSrc The source address.
2033 * @param cb The number of bytes to write.
2034 */
2035PGMDECL(int) PGMPhysWriteGCPtrDirty(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2036{
2037 /*
2038 * Anything to do?
2039 */
2040 if (!cb)
2041 return VINF_SUCCESS;
2042
2043 /*
2044 * Optimize writes within a single page.
2045 */
2046 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2047 {
2048 void *pvDst;
2049 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
2050 if (VBOX_FAILURE(rc))
2051 return rc;
2052 memcpy(pvDst, pvSrc, cb);
2053 rc = PGMGstModifyPage(pVM, GCPtrDst, cb, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2054 AssertRC(rc);
2055 return VINF_SUCCESS;
2056 }
2057
2058 /*
2059 * Page by page.
2060 */
2061 for (;;)
2062 {
2063 /* convert */
2064 void *pvDst;
2065 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
2066 if (VBOX_FAILURE(rc))
2067 return rc;
2068
2069 /* mark the guest page as accessed and dirty. */
2070 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2071 AssertRC(rc);
2072
2073 /* copy */
2074 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2075 if (cbWrite >= cb)
2076 {
2077 memcpy(pvDst, pvSrc, cb);
2078 return VINF_SUCCESS;
2079 }
2080 memcpy(pvDst, pvSrc, cbWrite);
2081
2082 /* next */
2083 cb -= cbWrite;
2084 GCPtrDst += cbWrite;
2085 pvSrc = (char *)pvSrc + cbWrite;
2086 }
2087}
2088
2089#endif /* !IN_GC */
2090
2091
2092
2093/**
2094 * Performs a read of guest virtual memory for instruction emulation.
2095 *
2096 * This will check permissions, raise exceptions and update the access bits.
2097 *
2098 * The current implementation will bypass all access handlers. It may later be
2099 * changed to at least respect MMIO.
2100 *
2101 *
2102 * @returns VBox status code suitable to scheduling.
2103 * @retval VINF_SUCCESS if the read was performed successfully.
2104 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2105 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2106 *
2107 * @param pVM The VM handle.
2108 * @param pCtxCore The context core.
2109 * @param pvDst Where to put the bytes we've read.
2110 * @param GCPtrSrc The source address.
2111 * @param cb The number of bytes to read. Not more than a page.
2112 *
2113 * @remark This function will dynamically map physical pages in GC. This may unmap
2114 * mappings done by the caller. Be careful!
2115 */
2116PGMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2117{
2118 Assert(cb <= PAGE_SIZE);
2119
2120/** @todo r=bird: This isn't perfect!
2121 * -# It's not checking for reserved bits being 1.
2122 * -# It's not correctly dealing with the access bit.
2123 * -# It's not respecting MMIO memory or any other access handlers.
2124 */
2125 /*
2126 * 1. Translate virtual to physical. This may fault.
2127 * 2. Map the physical address.
2128 * 3. Do the read operation.
2129 * 4. Set access bits if required.
2130 */
2131 int rc;
2132 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2133 if (cb <= cb1)
2134 {
2135 /*
2136 * Not crossing pages.
2137 */
2138 RTGCPHYS GCPhys;
2139 uint64_t fFlags;
2140 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
2141 if (VBOX_SUCCESS(rc))
2142 {
2143 /** @todo we should check reserved bits ... */
2144 void *pvSrc;
2145 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2146 switch (rc)
2147 {
2148 case VINF_SUCCESS:
2149Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2150 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2151 break;
2152 case VERR_PGM_PHYS_PAGE_RESERVED:
2153 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2154 memset(pvDst, 0, cb);
2155 break;
2156 default:
2157 return rc;
2158 }
2159
2160 /** @todo access bit emulation isn't 100% correct. */
2161 if (!(fFlags & X86_PTE_A))
2162 {
2163 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2164 AssertRC(rc);
2165 }
2166 return VINF_SUCCESS;
2167 }
2168 }
2169 else
2170 {
2171 /*
2172 * Crosses pages.
2173 */
2174 unsigned cb2 = cb - cb1;
2175 uint64_t fFlags1;
2176 RTGCPHYS GCPhys1;
2177 uint64_t fFlags2;
2178 RTGCPHYS GCPhys2;
2179 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
2180 if (VBOX_SUCCESS(rc))
2181 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2182 if (VBOX_SUCCESS(rc))
2183 {
2184 /** @todo we should check reserved bits ... */
2185AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%VGv\n", cb, cb1, cb2, GCPtrSrc));
2186 void *pvSrc1;
2187 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2188 switch (rc)
2189 {
2190 case VINF_SUCCESS:
2191 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2192 break;
2193 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2194 memset(pvDst, 0, cb1);
2195 break;
2196 default:
2197 return rc;
2198 }
2199
2200 void *pvSrc2;
2201 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2202 switch (rc)
2203 {
2204 case VINF_SUCCESS:
2205 memcpy((uint8_t *)pvDst + cb2, pvSrc2, cb2);
2206 break;
2207 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2208 memset((uint8_t *)pvDst + cb2, 0, cb2);
2209 break;
2210 default:
2211 return rc;
2212 }
2213
2214 if (!(fFlags1 & X86_PTE_A))
2215 {
2216 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2217 AssertRC(rc);
2218 }
2219 if (!(fFlags2 & X86_PTE_A))
2220 {
2221 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2222 AssertRC(rc);
2223 }
2224 return VINF_SUCCESS;
2225 }
2226 }
2227
2228 /*
2229 * Raise a #PF.
2230 */
2231 uint32_t uErr;
2232
2233 /* Get the current privilege level. */
2234 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
2235 switch (rc)
2236 {
2237 case VINF_SUCCESS:
2238 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2239 break;
2240
2241 case VERR_PAGE_NOT_PRESENT:
2242 case VERR_PAGE_TABLE_NOT_PRESENT:
2243 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2244 break;
2245
2246 default:
2247 AssertMsgFailed(("rc=%Vrc GCPtrSrc=%VGv cb=%#x\n", rc, GCPtrSrc, cb));
2248 return rc;
2249 }
2250 Log(("PGMPhysInterpretedRead: GCPtrSrc=%VGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2251 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2252}
2253
2254/// @todo PGMDECL(int) PGMPhysInterpretedWrite(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2255
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette