VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 2860

Last change on this file since 2860 was 2679, checked in by vboxsync, 18 years ago

fixed alignment issue, move the code to the right place and fixed the style.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 62.4 KB
Line 
1/* $Id: PGMAllPhys.cpp 2679 2007-05-16 19:11:24Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/** @def PGM_IGNORE_RAM_FLAGS_RESERVED
23 * Don't respect the MM_RAM_FLAGS_RESERVED flag when converting to HC addresses.
24 *
25 * Since this flag is currently incorrectly kept set for ROM regions we will
26 * have to ignore it for now so we don't break stuff.
27 */
28#define PGM_IGNORE_RAM_FLAGS_RESERVED
29
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_PGM_PHYS
35#include <VBox/pgm.h>
36#include <VBox/trpm.h>
37#include <VBox/vmm.h>
38#include "PGMInternal.h"
39#include <VBox/vm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <iprt/assert.h>
43#include <iprt/string.h>
44#include <iprt/asm.h>
45#include <VBox/log.h>
46#ifdef IN_RING3
47# include <iprt/thread.h>
48#endif
49
50
51
52/**
53 * Checks if Address Gate 20 is enabled or not.
54 *
55 * @returns true if enabled.
56 * @returns false if disabled.
57 * @param pVM VM handle.
58 */
59PGMDECL(bool) PGMPhysIsA20Enabled(PVM pVM)
60{
61 LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
62 return !!pVM->pgm.s.fA20Enabled ; /* stupid MS compiler doesn't trust me. */
63}
64
65
66/**
67 * Validates a GC physical address.
68 *
69 * @returns true if valid.
70 * @returns false if invalid.
71 * @param pVM The VM handle.
72 * @param GCPhys The physical address to validate.
73 */
74PGMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
75{
76 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
77 pRam;
78 pRam = CTXSUFF(pRam->pNext))
79 {
80 RTGCPHYS off = GCPhys - pRam->GCPhys;
81 if (off < pRam->cb)
82 return true;
83 }
84 return false;
85}
86
87
88/**
89 * Checks if a GC physical address is a normal page,
90 * i.e. not ROM, MMIO or reserved.
91 *
92 * @returns true if normal.
93 * @returns false if invalid, ROM, MMIO or reserved page.
94 * @param pVM The VM handle.
95 * @param GCPhys The physical address to check.
96 */
97PGMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
98{
99 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
100 pRam;
101 pRam = CTXSUFF(pRam->pNext))
102 {
103 RTGCPHYS off = GCPhys - pRam->GCPhys;
104 if (off < pRam->cb)
105 return !(pRam->aHCPhys[off >> PAGE_SHIFT] & (MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2));
106 }
107 return false;
108}
109
110
111/**
112 * Converts a GC physical address to a HC physical address.
113 *
114 * @returns VINF_SUCCESS on success.
115 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
116 * page but has no physical backing.
117 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
118 * GC physical address.
119 * @param pVM The VM handle.
120 * @param GCPhys The GC physical address to convert.
121 * @param pHCPhys Where to store the HC physical address on success.
122 */
123PGMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
124{
125 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
126 pRam;
127 pRam = CTXSUFF(pRam->pNext))
128 {
129 RTGCPHYS off = GCPhys - pRam->GCPhys;
130 if (off < pRam->cb)
131 {
132 if ( pRam->pvHC
133 || (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
134 {
135 unsigned iPage = off >> PAGE_SHIFT;
136 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
137 {
138#ifdef IN_RING3
139 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
140#else
141 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
142#endif
143 if (rc != VINF_SUCCESS)
144 return rc;
145 }
146
147 RTHCPHYS HCPhys = pRam->aHCPhys[off >> PAGE_SHIFT];
148#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
149 if (!(HCPhys & MM_RAM_FLAGS_RESERVED))
150#endif
151 {
152 *pHCPhys = (HCPhys & X86_PTE_PAE_PG_MASK)
153 | (off & PAGE_OFFSET_MASK);
154 return VINF_SUCCESS;
155 }
156 }
157 return VERR_PGM_PHYS_PAGE_RESERVED;
158 }
159 }
160 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
161}
162
163
164/**
165 * Converts a GC physical address to a HC pointer.
166 *
167 * @returns VINF_SUCCESS on success.
168 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
169 * page but has no physical backing.
170 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
171 * GC physical address.
172 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
173 * a dynamic ram chunk boundary
174 * @param pVM The VM handle.
175 * @param GCPhys The GC physical address to convert.
176 * @param cbRange Physical range
177 * @param pHCPtr Where to store the HC pointer on success.
178 */
179PGMDECL(int) PGMPhysGCPhys2HCPtr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTHCPTR pHCPtr)
180{
181#ifdef PGM_DYNAMIC_RAM_ALLOC
182 if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK))
183 {
184 AssertMsgFailed(("PGMPhysGCPhys2HCPtr %VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
185 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
186 }
187#endif
188
189 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
190 pRam;
191 pRam = CTXSUFF(pRam->pNext))
192 {
193 RTGCPHYS off = GCPhys - pRam->GCPhys;
194 if (off < pRam->cb)
195 {
196 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
197 {
198 unsigned iPage = off >> PAGE_SHIFT;
199 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
200 {
201#ifdef IN_RING3
202 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
203#else
204 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
205#endif
206 if (rc != VINF_SUCCESS)
207 return rc;
208 }
209 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
210 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
211 return VINF_SUCCESS;
212 }
213 if (pRam->pvHC)
214 {
215#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
216 if (!(pRam->aHCPhys[off >> PAGE_SHIFT] & MM_RAM_FLAGS_RESERVED))
217#endif
218 {
219 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
220 return VINF_SUCCESS;
221 }
222 }
223 return VERR_PGM_PHYS_PAGE_RESERVED;
224 }
225 }
226 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
227}
228
229
230/**
231 * Validates a HC pointer.
232 *
233 * @returns true if valid.
234 * @returns false if invalid.
235 * @param pVM The VM handle.
236 * @param HCPtr The pointer to validate.
237 */
238PGMDECL(bool) PGMPhysIsHCPtrValid(PVM pVM, RTHCPTR HCPtr)
239{
240 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
241 pRam;
242 pRam = CTXSUFF(pRam->pNext))
243 {
244 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
245 {
246 /** @note this is quite slow */
247 for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
248 {
249 if (CTXSUFF(pRam->pavHCChunk)[iChunk])
250 {
251 RTHCUINTPTR off = (RTHCUINTPTR)HCPtr - (RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[iChunk];
252 if (off < PGM_DYNAMIC_CHUNK_SIZE)
253 return true;
254 }
255 }
256 }
257 else if (pRam->pvHC)
258 {
259 RTHCUINTPTR off = (RTHCUINTPTR)HCPtr - (RTHCUINTPTR)pRam->pvHC;
260
261 if (off < pRam->cb)
262 return true;
263 }
264 }
265 return false;
266}
267
268
269/**
270 * Converts a HC pointer to a GC physical address.
271 *
272 * @returns VINF_SUCCESS on success.
273 * @returns VERR_INVALID_POINTER if the pointer is not within the
274 * GC physical memory.
275 * @param pVM The VM handle.
276 * @param HCPtr The HC pointer to convert.
277 * @param pGCPhys Where to store the GC physical address on success.
278 */
279PGMDECL(int) PGMPhysHCPtr2GCPhys(PVM pVM, RTHCPTR HCPtr, PRTGCPHYS pGCPhys)
280{
281 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
282 pRam;
283 pRam = CTXSUFF(pRam->pNext))
284 {
285 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
286 {
287 /** @note this is quite slow */
288 for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
289 {
290 if (CTXSUFF(pRam->pavHCChunk)[iChunk])
291 {
292 RTHCUINTPTR off = (RTHCUINTPTR)HCPtr - (RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[iChunk];
293 if (off < PGM_DYNAMIC_CHUNK_SIZE)
294 {
295 *pGCPhys = pRam->GCPhys + iChunk*PGM_DYNAMIC_CHUNK_SIZE + off;
296 return VINF_SUCCESS;
297 }
298 }
299 }
300 }
301 else if (pRam->pvHC)
302 {
303 RTHCUINTPTR off = (RTHCUINTPTR)HCPtr - (RTHCUINTPTR)pRam->pvHC;
304 if (off < pRam->cb)
305 {
306 *pGCPhys = pRam->GCPhys + off;
307 return VINF_SUCCESS;
308 }
309 }
310 }
311 return VERR_INVALID_POINTER;
312}
313
314
315/**
316 * Converts a HC pointer to a GC physical address.
317 *
318 * @returns VINF_SUCCESS on success.
319 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
320 * page but has no physical backing.
321 * @returns VERR_INVALID_POINTER if the pointer is not within the
322 * GC physical memory.
323 * @param pVM The VM handle.
324 * @param HCPtr The HC pointer to convert.
325 * @param pHCPhys Where to store the HC physical address on success.
326 */
327PGMDECL(int) PGMPhysHCPtr2HCPhys(PVM pVM, RTHCPTR HCPtr, PRTHCPHYS pHCPhys)
328{
329 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
330 pRam;
331 pRam = CTXSUFF(pRam->pNext))
332 {
333 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
334 {
335 /** @note this is quite slow */
336 for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
337 {
338 if (CTXSUFF(pRam->pavHCChunk)[iChunk])
339 {
340 RTHCUINTPTR off = (RTHCUINTPTR)HCPtr - (RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[iChunk];
341 if (off < PGM_DYNAMIC_CHUNK_SIZE)
342 {
343 RTHCPHYS HCPhys = pRam->aHCPhys[off >> PAGE_SHIFT];
344#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
345 if (!(HCPhys & MM_RAM_FLAGS_RESERVED))
346#endif
347 {
348 *pHCPhys = (HCPhys & X86_PTE_PAE_PG_MASK)
349 | (off & PAGE_OFFSET_MASK);
350 return VINF_SUCCESS;
351 }
352 return VERR_PGM_PHYS_PAGE_RESERVED;
353 }
354 }
355 }
356 }
357 else if (pRam->pvHC)
358 {
359 RTHCUINTPTR off = (RTHCUINTPTR)HCPtr - (RTHCUINTPTR)pRam->pvHC;
360 if (off < pRam->cb)
361 {
362 RTHCPHYS HCPhys = pRam->aHCPhys[off >> PAGE_SHIFT];
363#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
364 if (!(HCPhys & MM_RAM_FLAGS_RESERVED))
365#endif
366 {
367 *pHCPhys = (HCPhys & X86_PTE_PAE_PG_MASK)
368 | (off & PAGE_OFFSET_MASK);
369 return VINF_SUCCESS;
370 }
371 return VERR_PGM_PHYS_PAGE_RESERVED;
372 }
373 }
374 }
375 return VERR_INVALID_POINTER;
376}
377
378
379/**
380 * Validates a HC Physical address.
381 *
382 * This is an extremely slow API, don't use it!
383 *
384 * @returns true if valid.
385 * @returns false if invalid.
386 * @param pVM The VM handle.
387 * @param HCPhys The physical address to validate.
388 */
389PGMDECL(bool) PGMPhysIsHCPhysValid(PVM pVM, RTHCPHYS HCPhys)
390{
391 RTGCPHYS GCPhys;
392 int rc = PGMPhysHCPhys2GCPhys(pVM, HCPhys, &GCPhys);
393 return VBOX_SUCCESS(rc);
394}
395
396
397/**
398 * Converts a HC physical address to a GC physical address.
399 *
400 * This is an extremely slow API, don't use it!
401 *
402 * @returns VINF_SUCCESS on success.
403 * @returns VERR_INVALID_POINTER if the HC physical address is
404 * not within the GC physical memory.
405 * @param pVM The VM handle.
406 * @param HCPhys The HC physical address to convert.
407 * @param pGCPhys Where to store the GC physical address on success.
408 */
409PGMDECL(int) PGMPhysHCPhys2GCPhys(PVM pVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys)
410{
411 unsigned off = HCPhys & PAGE_OFFSET_MASK;
412 HCPhys &= X86_PTE_PAE_PG_MASK;
413 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
414 pRam;
415 pRam = CTXSUFF(pRam->pNext))
416 {
417 if ( pRam->pvHC
418 || (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
419 {
420 unsigned iPage = pRam->cb >> PAGE_SHIFT;
421 while (iPage-- > 0)
422#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
423 if ((pRam->aHCPhys[iPage] & (X86_PTE_PAE_PG_MASK | MM_RAM_FLAGS_RESERVED)) == HCPhys)
424#else
425 if ((pRam->aHCPhys[iPage] & (X86_PTE_PAE_PG_MASK)) == HCPhys)
426#endif
427 {
428 *pGCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT) + off;
429 return VINF_SUCCESS;
430 }
431 }
432 }
433 return VERR_INVALID_POINTER;
434}
435
436
437/**
438 * Converts a HC physical address to a HC pointer.
439 *
440 * This is an extremely slow API, don't use it!
441 *
442 * @returns VINF_SUCCESS on success.
443 * @returns VERR_INVALID_POINTER if the HC physical address is
444 * not within the GC physical memory.
445 * @param pVM The VM handle.
446 * @param HCPhys The HC physical address to convert.
447 * @param pHCPtr Where to store the HC pointer on success.
448 */
449PGMDECL(int) PGMPhysHCPhys2HCPtr(PVM pVM, RTHCPHYS HCPhys, PRTHCPTR pHCPtr)
450{
451 unsigned off = HCPhys & PAGE_OFFSET_MASK;
452 HCPhys &= X86_PTE_PAE_PG_MASK;
453 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
454 pRam;
455 pRam = CTXSUFF(pRam->pNext))
456 {
457 if ( pRam->pvHC
458 || (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
459 {
460 unsigned iPage = pRam->cb >> PAGE_SHIFT;
461 while (iPage-- > 0)
462#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
463 if ((pRam->aHCPhys[iPage] & (X86_PTE_PAE_PG_MASK | MM_RAM_FLAGS_RESERVED)) == HCPhys)
464#else
465 if ((pRam->aHCPhys[iPage] & (X86_PTE_PAE_PG_MASK)) == HCPhys)
466#endif
467 {
468 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
469 {
470 unsigned idx = (iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT));
471
472 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK) + off);
473 }
474 else
475 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + (iPage << PAGE_SHIFT) + off);
476
477 return VINF_SUCCESS;
478 }
479 }
480 }
481 return VERR_INVALID_POINTER;
482}
483
484
485/**
486 * Converts a guest pointer to a GC physical address.
487 *
488 * This uses the current CR3/CR0/CR4 of the guest.
489 *
490 * @returns VBox status code.
491 * @param pVM The VM Handle
492 * @param GCPtr The guest pointer to convert.
493 * @param pGCPhys Where to store the HC physical address.
494 */
495PGMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
496{
497 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
498}
499
500
501/**
502 * Converts a guest pointer to a HC physical address.
503 *
504 * This uses the current CR3/CR0/CR4 of the guest.
505 *
506 * @returns VBox status code.
507 * @param pVM The VM Handle
508 * @param GCPtr The guest pointer to convert.
509 * @param pHCPhys Where to store the HC physical address.
510 */
511PGMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
512{
513 RTGCPHYS GCPhys;
514 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
515 if (VBOX_SUCCESS(rc))
516 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
517 return rc;
518}
519
520
521/**
522 * Converts a guest pointer to a HC pointer.
523 *
524 * This uses the current CR3/CR0/CR4 of the guest.
525 *
526 * @returns VBox status code.
527 * @param pVM The VM Handle
528 * @param GCPtr The guest pointer to convert.
529 * @param pHCPtr Where to store the HC virtual address.
530 */
531PGMDECL(int) PGMPhysGCPtr2HCPtr(PVM pVM, RTGCPTR GCPtr, PRTHCPTR pHCPtr)
532{
533 RTGCPHYS GCPhys;
534 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
535 if (VBOX_SUCCESS(rc))
536 rc = PGMPhysGCPhys2HCPtr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
537 return rc;
538}
539
540
541/**
542 * Converts a guest virtual address to a HC pointer by specfied CR3 and flags.
543 *
544 * @returns VBox status code.
545 * @param pVM The VM Handle
546 * @param GCPtr The guest pointer to convert.
547 * @param cr3 The guest CR3.
548 * @param fFlags Flags used for interpreting the PD correctly: X86_CR4_PSE and X86_CR4_PAE
549 * @param pHCPtr Where to store the HC pointer.
550 *
551 * @remark This function is used by the REM at a time where PGM could
552 * potentially not be in sync. It could also be used by a
553 * future DBGF API to cpu state independent conversions.
554 */
555PGMDECL(int) PGMPhysGCPtr2HCPtrByGstCR3(PVM pVM, RTGCPTR GCPtr, uint32_t cr3, unsigned fFlags, PRTHCPTR pHCPtr)
556{
557 /*
558 * PAE or 32-bit?
559 */
560 int rc;
561 if (!(fFlags & X86_CR4_PAE))
562 {
563 PX86PD pPD;
564 rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAGE_MASK, &pPD);
565 if (VBOX_SUCCESS(rc))
566 {
567 VBOXPDE Pde = pPD->a[(RTGCUINTPTR)GCPtr >> X86_PD_SHIFT];
568 if (Pde.n.u1Present)
569 {
570 if ((fFlags & X86_CR4_PSE) && Pde.b.u1Size)
571 { /* (big page) */
572 rc = PGMPhysGCPhys2HCPtr(pVM, (Pde.u & X86_PDE4M_PG_MASK) | ((RTGCUINTPTR)GCPtr & X86_PAGE_4M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
573 }
574 else
575 { /* (normal page) */
576 PVBOXPT pPT;
577 rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & X86_PDE_PG_MASK, &pPT);
578 if (VBOX_SUCCESS(rc))
579 {
580 VBOXPTE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_SHIFT) & X86_PT_MASK];
581 if (Pte.n.u1Present)
582 return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
583 rc = VERR_PAGE_NOT_PRESENT;
584 }
585 }
586 }
587 else
588 rc = VERR_PAGE_TABLE_NOT_PRESENT;
589 }
590 }
591 else
592 {
593 /** @todo long mode! */
594 PX86PDPTR pPdptr;
595 rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, &pPdptr);
596 if (VBOX_SUCCESS(rc))
597 {
598 X86PDPE Pdpe = pPdptr->a[((RTGCUINTPTR)GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK];
599 if (Pdpe.n.u1Present)
600 {
601 PX86PDPAE pPD;
602 rc = PGM_GCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPD);
603 if (VBOX_SUCCESS(rc))
604 {
605 X86PDEPAE Pde = pPD->a[((RTGCUINTPTR)GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK];
606 if (Pde.n.u1Present)
607 {
608 if ((fFlags & X86_CR4_PSE) && Pde.b.u1Size)
609 { /* (big page) */
610 rc = PGMPhysGCPhys2HCPtr(pVM, (Pde.u & X86_PDE4M_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & X86_PAGE_4M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
611 }
612 else
613 { /* (normal page) */
614 PX86PTPAE pPT;
615 rc = PGM_GCPHYS_2_PTR(pVM, (Pde.u & X86_PDE_PAE_PG_MASK), &pPT);
616 if (VBOX_SUCCESS(rc))
617 {
618 X86PTEPAE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK];
619 if (Pte.n.u1Present)
620 return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
621 rc = VERR_PAGE_NOT_PRESENT;
622 }
623 }
624 }
625 else
626 rc = VERR_PAGE_TABLE_NOT_PRESENT;
627 }
628 }
629 else
630 rc = VERR_PAGE_TABLE_NOT_PRESENT;
631 }
632 }
633 return rc;
634}
635
636
637#undef LOG_GROUP
638#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
639
640
641#ifdef IN_RING3
642/**
643 * Cache PGMPhys memory access
644 *
645 * @param pVM VM Handle.
646 * @param pCache Cache structure pointer
647 * @param GCPhys GC physical address
648 * @param pbHC HC pointer corresponding to physical page
649 */
650static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbHC)
651{
652 uint32_t iCacheIndex;
653
654 GCPhys = PAGE_ADDRESS(GCPhys);
655 pbHC = (uint8_t *)PAGE_ADDRESS(pbHC);
656
657 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
658
659 ASMBitSet(&pCache->aEntries, iCacheIndex);
660
661 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
662 pCache->Entry[iCacheIndex].pbHC = pbHC;
663}
664#endif
665
666/**
667 * Read physical memory.
668 *
669 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
670 * want to ignore those.
671 *
672 * @param pVM VM Handle.
673 * @param GCPhys Physical address start reading from.
674 * @param pvBuf Where to put the read bits.
675 * @param cbRead How many bytes to read.
676 */
677PGMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
678{
679#ifdef IN_RING3
680 bool fGrabbedLock = false;
681#endif
682
683 AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n"));
684 if (cbRead == 0)
685 return;
686
687 LogFlow(("PGMPhysRead: %VGp %d\n", GCPhys, cbRead));
688
689#ifdef IN_RING3
690 if (!VM_IS_EMT(pVM))
691 {
692 pgmLock(pVM);
693 fGrabbedLock = true;
694 }
695#endif
696
697 /*
698 * Copy loop on ram ranges.
699 */
700 PPGMRAMRANGE pCur = CTXSUFF(pVM->pgm.s.pRamRanges);
701 for (;;)
702 {
703 /* Find range. */
704 while (pCur && GCPhys > pCur->GCPhysLast)
705 pCur = CTXSUFF(pCur->pNext);
706 /* Inside range or not? */
707 if (pCur && GCPhys >= pCur->GCPhys)
708 {
709 /*
710 * Must work our way thru this page by page.
711 */
712 RTGCPHYS off = GCPhys - pCur->GCPhys;
713 while (off < pCur->cb)
714 {
715 unsigned iPage = off >> PAGE_SHIFT;
716 size_t cb;
717
718 /* Physical chunk in dynamically allocated range not present? */
719 if (RT_UNLIKELY(!(pCur->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
720 {
721 /* Treat it as reserved; return zeros */
722 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
723 if (cb >= cbRead)
724 {
725 memset(pvBuf, 0, cbRead);
726 goto end;
727 }
728 memset(pvBuf, 0, cb);
729 }
730 else
731 {
732 RTHCPHYS HCPhys = pCur->aHCPhys[iPage];
733 switch (HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_ROM))
734 {
735 /*
736 * Normal memory or ROM.
737 */
738 case 0:
739 case MM_RAM_FLAGS_ROM:
740 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED:
741 case MM_RAM_FLAGS_PHYSICAL_WRITE:
742 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_WRITE:
743 case MM_RAM_FLAGS_VIRTUAL_WRITE:
744 {
745#ifdef IN_GC
746 void *pvSrc = NULL;
747 PGMGCDynMapHCPage(pVM, HCPhys & X86_PTE_PAE_PG_MASK, &pvSrc);
748 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
749#else
750 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
751#endif
752 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
753 if (cb >= cbRead)
754 {
755#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
756 if (cbRead <= 4)
757 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphysreadcache, GCPhys, (uint8_t*)pvSrc);
758#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
759 memcpy(pvBuf, pvSrc, cbRead);
760 goto end;
761 }
762 memcpy(pvBuf, pvSrc, cb);
763 break;
764 }
765
766 /*
767 * All reserved, nothing there.
768 */
769 case MM_RAM_FLAGS_RESERVED:
770 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
771 if (cb >= cbRead)
772 {
773 memset(pvBuf, 0, cbRead);
774 goto end;
775 }
776 memset(pvBuf, 0, cb);
777 break;
778
779 /*
780 * Physical handler.
781 */
782 case MM_RAM_FLAGS_PHYSICAL_ALL:
783 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_ALL: /** r=bird: MMIO2 isn't in the mask! */
784 {
785 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
786 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
787#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
788
789 /* find and call the handler */
790 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
791 if (pNode && pNode->pfnHandlerR3)
792 {
793 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
794 if (cbRange < cb)
795 cb = cbRange;
796 if (cb > cbRead)
797 cb = cbRead;
798
799 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
800
801 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
802 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pNode->pvUserR3);
803 }
804#endif /* IN_RING3 */
805 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
806 {
807#ifdef IN_GC
808 void *pvSrc = NULL;
809 PGMGCDynMapHCPage(pVM, HCPhys & X86_PTE_PAE_PG_MASK, &pvSrc);
810 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
811#else
812 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
813#endif
814
815 if (cb >= cbRead)
816 {
817 memcpy(pvBuf, pvSrc, cbRead);
818 goto end;
819 }
820 memcpy(pvBuf, pvSrc, cb);
821 }
822 else if (cb >= cbRead)
823 goto end;
824 break;
825 }
826
827 case MM_RAM_FLAGS_VIRTUAL_ALL:
828 {
829 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
830 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
831#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
832 /* Search the whole tree for matching physical addresses (rather expensive!) */
833 PPGMVIRTHANDLER pNode;
834 unsigned iPage;
835 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
836 if (VBOX_SUCCESS(rc2) && pNode->pfnHandlerHC)
837 {
838 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
839 if (cbRange < cb)
840 cb = cbRange;
841 if (cb > cbRead)
842 cb = cbRead;
843 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->GCPtr & PAGE_BASE_GC_MASK)
844 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
845
846 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
847
848 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
849 rc = pNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, 0);
850 }
851#endif /* IN_RING3 */
852 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
853 {
854#ifdef IN_GC
855 void *pvSrc = NULL;
856 PGMGCDynMapHCPage(pVM, HCPhys & X86_PTE_PAE_PG_MASK, &pvSrc);
857 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
858#else
859 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
860#endif
861 if (cb >= cbRead)
862 {
863 memcpy(pvBuf, pvSrc, cbRead);
864 goto end;
865 }
866 memcpy(pvBuf, pvSrc, cb);
867 }
868 else if (cb >= cbRead)
869 goto end;
870 break;
871 }
872
873 /*
874 * The rest needs to be taken more carefully.
875 */
876 default:
877#if 1 /** @todo r=bird: Can you do this properly please. */
878 /** @todo Try MMIO; quick hack */
879 if (cbRead <= 4 && IOMMMIORead(pVM, GCPhys, (uint32_t *)pvBuf, cbRead) == VINF_SUCCESS)
880 goto end;
881#endif
882
883 /** @todo fix me later. */
884 AssertReleaseMsgFailed(("Unknown read at %VGp size %d implement the complex physical reading case %x\n",
885 GCPhys, cbRead,
886 HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_ROM)));
887 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
888 break;
889 }
890 }
891 cbRead -= cb;
892 off += cb;
893 pvBuf = (char *)pvBuf + cb;
894 }
895
896 GCPhys = pCur->GCPhysLast + 1;
897 }
898 else
899 {
900 LogFlow(("PGMPhysRead: Unassigned %VGp size=%d\n", GCPhys, cbRead));
901
902 /*
903 * Unassigned address space.
904 */
905 size_t cb;
906 if ( !pCur
907 || (cb = pCur->GCPhys - GCPhys) >= cbRead)
908 {
909 memset(pvBuf, 0, cbRead);
910 goto end;
911 }
912
913 memset(pvBuf, 0, cb);
914 cbRead -= cb;
915 pvBuf = (char *)pvBuf + cb;
916 GCPhys += cb;
917 }
918 }
919end:
920#ifdef IN_RING3
921 if (fGrabbedLock)
922 pgmUnlock(pVM);
923#endif
924 return;
925}
926
927/**
928 * Write to physical memory.
929 *
930 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
931 * want to ignore those.
932 *
933 * @param pVM VM Handle.
934 * @param GCPhys Physical address to write to.
935 * @param pvBuf What to write.
936 * @param cbWrite How many bytes to write.
937 */
938PGMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
939{
940#ifdef IN_RING3
941 bool fGrabbedLock = false;
942#endif
943
944 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
945 AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n"));
946 if (cbWrite == 0)
947 return;
948
949 LogFlow(("PGMPhysWrite: %VGp %d\n", GCPhys, cbWrite));
950
951#ifdef IN_RING3
952 if (!VM_IS_EMT(pVM))
953 {
954 pgmLock(pVM);
955 fGrabbedLock = true;
956 }
957#endif
958 /*
959 * Copy loop on ram ranges.
960 */
961 PPGMRAMRANGE pCur = CTXSUFF(pVM->pgm.s.pRamRanges);
962 for (;;)
963 {
964 /* Find range. */
965 while (pCur && GCPhys > pCur->GCPhysLast)
966 pCur = CTXSUFF(pCur->pNext);
967 /* Inside range or not? */
968 if (pCur && GCPhys >= pCur->GCPhys)
969 {
970 /*
971 * Must work our way thru this page by page.
972 */
973 unsigned off = GCPhys - pCur->GCPhys;
974 while (off < pCur->cb)
975 {
976 unsigned iPage = off >> PAGE_SHIFT;
977
978 /* Physical chunk in dynamically allocated range not present? */
979 if (RT_UNLIKELY(!(pCur->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
980 {
981 int rc;
982#ifdef IN_RING3
983 if (fGrabbedLock)
984 {
985 pgmUnlock(pVM);
986 rc = pgmr3PhysGrowRange(pVM, GCPhys);
987 if (rc == VINF_SUCCESS)
988 PGMPhysWrite(pVM, GCPhys, pvBuf, cbWrite); /* try again; can't assume pCur is still valid (paranoia) */
989 return;
990 }
991 rc = pgmr3PhysGrowRange(pVM, GCPhys);
992#else
993 rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
994#endif
995 if (rc != VINF_SUCCESS)
996 goto end;
997 }
998
999 size_t cb;
1000 RTHCPHYS HCPhys = pCur->aHCPhys[iPage];
1001 /** @todo r=bird: missing MM_RAM_FLAGS_ROM here, we shall not allow anyone to overwrite the ROM! */
1002 switch (HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE))
1003 {
1004 /*
1005 * Normal memory.
1006 */
1007 case 0:
1008 case MM_RAM_FLAGS_MMIO2:
1009 {
1010#ifdef IN_GC
1011 void *pvDst = NULL;
1012 PGMGCDynMapHCPage(pVM, HCPhys & X86_PTE_PAE_PG_MASK, &pvDst);
1013 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1014#else
1015 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1016#endif
1017 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1018 if (cb >= cbWrite)
1019 {
1020#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
1021 if (cbWrite <= 4)
1022 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphyswritecache, GCPhys, (uint8_t*)pvDst);
1023#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
1024 memcpy(pvDst, pvBuf, cbWrite);
1025 goto end;
1026 }
1027 memcpy(pvDst, pvBuf, cb);
1028 break;
1029 }
1030
1031 /*
1032 * All reserved, nothing there.
1033 */
1034 case MM_RAM_FLAGS_RESERVED:
1035 case MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2:
1036 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1037 if (cb >= cbWrite)
1038 goto end;
1039 break;
1040
1041 /*
1042 * Physical handler.
1043 */
1044 case MM_RAM_FLAGS_PHYSICAL_ALL:
1045 case MM_RAM_FLAGS_PHYSICAL_WRITE:
1046 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_ALL:
1047 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_WRITE:
1048 {
1049 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1050 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1051#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1052 /* find and call the handler */
1053 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1054 if (pNode && pNode->pfnHandlerR3)
1055 {
1056 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1057 if (cbRange < cb)
1058 cb = cbRange;
1059 if (cb > cbWrite)
1060 cb = cbWrite;
1061
1062 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1063
1064 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1065 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pNode->pvUserR3);
1066 }
1067#endif /* IN_RING3 */
1068 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1069 {
1070#ifdef IN_GC
1071 void *pvDst = NULL;
1072 PGMGCDynMapHCPage(pVM, HCPhys & X86_PTE_PAE_PG_MASK, &pvDst);
1073 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1074#else
1075 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1076#endif
1077 if (cb >= cbWrite)
1078 {
1079 memcpy(pvDst, pvBuf, cbWrite);
1080 goto end;
1081 }
1082 memcpy(pvDst, pvBuf, cb);
1083 }
1084 else if (cb >= cbWrite)
1085 goto end;
1086 break;
1087 }
1088
1089 case MM_RAM_FLAGS_VIRTUAL_ALL:
1090 case MM_RAM_FLAGS_VIRTUAL_WRITE:
1091 {
1092 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1093 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1094#ifdef IN_RING3
1095/** @todo deal with this in GC and R0! */
1096 /* Search the whole tree for matching physical addresses (rather expensive!) */
1097 PPGMVIRTHANDLER pNode;
1098 unsigned iPage;
1099 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
1100 if (VBOX_SUCCESS(rc2) && pNode->pfnHandlerHC)
1101 {
1102 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1103 if (cbRange < cb)
1104 cb = cbRange;
1105 if (cb > cbWrite)
1106 cb = cbWrite;
1107 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->GCPtr & PAGE_BASE_GC_MASK)
1108 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1109
1110 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1111
1112 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1113 rc = pNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
1114 }
1115#endif /* IN_RING3 */
1116 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1117 {
1118#ifdef IN_GC
1119 void *pvDst = NULL;
1120 PGMGCDynMapHCPage(pVM, HCPhys & X86_PTE_PAE_PG_MASK, &pvDst);
1121 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1122#else
1123 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1124#endif
1125 if (cb >= cbWrite)
1126 {
1127 memcpy(pvDst, pvBuf, cbWrite);
1128 goto end;
1129 }
1130 memcpy(pvDst, pvBuf, cb);
1131 }
1132 else if (cb >= cbWrite)
1133 goto end;
1134 break;
1135 }
1136
1137 /*
1138 * Physical write handler + virtual write handler.
1139 * Consider this a quick workaround for the CSAM + shadow caching problem.
1140 *
1141 * We hand it to the shadow caching first since it requires the unchanged
1142 * data. CSAM will have to put up with it already being changed.
1143 */
1144 case MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_VIRTUAL_WRITE:
1145 {
1146 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1147 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1148#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1149 /* 1. The physical handler */
1150 PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1151 if (pPhysNode && pPhysNode->pfnHandlerR3)
1152 {
1153 size_t cbRange = pPhysNode->Core.KeyLast - GCPhys + 1;
1154 if (cbRange < cb)
1155 cb = cbRange;
1156 if (cb > cbWrite)
1157 cb = cbWrite;
1158
1159 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1160
1161 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1162 rc = pPhysNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pPhysNode->pvUserR3);
1163 }
1164
1165 /* 2. The virtual handler (will see incorrect data) */
1166 PPGMVIRTHANDLER pVirtNode;
1167 unsigned iPage;
1168 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirtNode, &iPage);
1169 if (VBOX_SUCCESS(rc2) && pVirtNode->pfnHandlerHC)
1170 {
1171 size_t cbRange = pVirtNode->Core.KeyLast - GCPhys + 1;
1172 if (cbRange < cb)
1173 cb = cbRange;
1174 if (cb > cbWrite)
1175 cb = cbWrite;
1176 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirtNode->GCPtr & PAGE_BASE_GC_MASK)
1177 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1178
1179 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1180
1181 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1182 rc2 = pVirtNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
1183 if ( ( rc2 != VINF_PGM_HANDLER_DO_DEFAULT
1184 && rc == VINF_PGM_HANDLER_DO_DEFAULT)
1185 || ( VBOX_FAILURE(rc2)
1186 && VBOX_SUCCESS(rc)))
1187 rc = rc2;
1188 }
1189#endif /* IN_RING3 */
1190 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1191 {
1192#ifdef IN_GC
1193 void *pvDst = NULL;
1194 PGMGCDynMapHCPage(pVM, HCPhys & X86_PTE_PAE_PG_MASK, &pvDst);
1195 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1196#else
1197 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1198#endif
1199 if (cb >= cbWrite)
1200 {
1201 memcpy(pvDst, pvBuf, cbWrite);
1202 goto end;
1203 }
1204 memcpy(pvDst, pvBuf, cb);
1205 }
1206 else if (cb >= cbWrite)
1207 goto end;
1208 break;
1209 }
1210
1211
1212 /*
1213 * The rest needs to be taken more carefully.
1214 */
1215 default:
1216#if 1 /** @todo r=bird: Can you do this properly please. */
1217 /** @todo Try MMIO; quick hack */
1218 if (cbWrite <= 4 && IOMMMIOWrite(pVM, GCPhys, *(uint32_t *)pvBuf, cbWrite) == VINF_SUCCESS)
1219 goto end;
1220#endif
1221
1222 /** @todo fix me later. */
1223 AssertReleaseMsgFailed(("Unknown write at %VGp size %d implement the complex physical writing case %x\n",
1224 GCPhys, cbWrite,
1225 (HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE))));
1226 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1227 break;
1228 }
1229
1230 cbWrite -= cb;
1231 off += cb;
1232 pvBuf = (const char *)pvBuf + cb;
1233 }
1234
1235 GCPhys = pCur->GCPhysLast + 1;
1236 }
1237 else
1238 {
1239 /*
1240 * Unassigned address space.
1241 */
1242 size_t cb;
1243 if ( !pCur
1244 || (cb = pCur->GCPhys - GCPhys) >= cbWrite)
1245 goto end;
1246
1247 cbWrite -= cb;
1248 pvBuf = (const char *)pvBuf + cb;
1249 GCPhys += cb;
1250 }
1251 }
1252end:
1253#ifdef IN_RING3
1254 if (fGrabbedLock)
1255 pgmUnlock(pVM);
1256#endif
1257 return;
1258}
1259
1260#ifndef IN_GC /* Ring 0 & 3 only */
1261
1262/**
1263 * Read from guest physical memory by GC physical address, bypassing
1264 * MMIO and access handlers.
1265 *
1266 * @returns VBox status.
1267 * @param pVM VM handle.
1268 * @param pvDst The destination address.
1269 * @param GCPhysSrc The source address (GC physical address).
1270 * @param cb The number of bytes to read.
1271 */
1272PGMDECL(int) PGMPhysReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
1273{
1274 /*
1275 * Anything to be done?
1276 */
1277 if (!cb)
1278 return VINF_SUCCESS;
1279
1280 /*
1281 * Loop ram ranges.
1282 */
1283 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
1284 pRam;
1285 pRam = pRam->CTXSUFF(pNext))
1286 {
1287 RTGCPHYS off = GCPhysSrc - pRam->GCPhys;
1288 if (off < pRam->cb)
1289 {
1290 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1291 {
1292 /* Copy page by page as we're not dealing with a linear HC range. */
1293 for (;;)
1294 {
1295 /* convert */
1296 void *pvSrc;
1297 int rc = PGMRamGCPhys2HCPtr(pVM, pRam, GCPhysSrc, &pvSrc);
1298 if (VBOX_FAILURE(rc))
1299 return rc;
1300
1301 /* copy */
1302 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPhysSrc & PAGE_OFFSET_MASK);
1303 if (cbRead >= cb)
1304 {
1305 memcpy(pvDst, pvSrc, cb);
1306 return VINF_SUCCESS;
1307 }
1308 memcpy(pvDst, pvSrc, cbRead);
1309
1310 /* next */
1311 cb -= cbRead;
1312 pvDst = (uint8_t *)pvDst + cbRead;
1313 GCPhysSrc += cbRead;
1314 }
1315 }
1316 else if (pRam->pvHC)
1317 {
1318 /* read */
1319 size_t cbRead = pRam->cb - off;
1320 if (cbRead >= cb)
1321 {
1322 memcpy(pvDst, (uint8_t *)pRam->pvHC + off, cb);
1323 return VINF_SUCCESS;
1324 }
1325 memcpy(pvDst, (uint8_t *)pRam->pvHC + off, cbRead);
1326
1327 /* next */
1328 cb -= cbRead;
1329 pvDst = (uint8_t *)pvDst + cbRead;
1330 GCPhysSrc += cbRead;
1331 }
1332 else
1333 return VERR_PGM_PHYS_PAGE_RESERVED;
1334 }
1335 else if (GCPhysSrc < pRam->GCPhysLast)
1336 break;
1337 }
1338 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1339}
1340
1341
1342/**
1343 * Write to guest physical memory referenced by GC pointer.
1344 * Write memory to GC physical address in guest physical memory.
1345 *
1346 * This will bypass MMIO and access handlers.
1347 *
1348 * @returns VBox status.
1349 * @param pVM VM handle.
1350 * @param GCPhysDst The GC physical address of the destination.
1351 * @param pvSrc The source buffer.
1352 * @param cb The number of bytes to write.
1353 */
1354PGMDECL(int) PGMPhysWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
1355{
1356 /*
1357 * Anything to be done?
1358 */
1359 if (!cb)
1360 return VINF_SUCCESS;
1361
1362 LogFlow(("PGMPhysWriteGCPhys: %VGp %d\n", GCPhysDst, cb));
1363
1364 /*
1365 * Loop ram ranges.
1366 */
1367 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
1368 pRam;
1369 pRam = pRam->CTXSUFF(pNext))
1370 {
1371 RTGCPHYS off = GCPhysDst - pRam->GCPhys;
1372 if (off < pRam->cb)
1373 {
1374 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1375 {
1376 /* Copy page by page as we're not dealing with a linear HC range. */
1377 for (;;)
1378 {
1379 /* convert */
1380 void *pvDst;
1381 int rc = PGMRamGCPhys2HCPtr(pVM, pRam, GCPhysDst, &pvDst);
1382 if (VBOX_FAILURE(rc))
1383 return rc;
1384
1385 /* copy */
1386 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPhysDst & PAGE_OFFSET_MASK);
1387 if (cbWrite >= cb)
1388 {
1389 memcpy(pvDst, pvSrc, cb);
1390 return VINF_SUCCESS;
1391 }
1392 memcpy(pvDst, pvSrc, cbWrite);
1393
1394 /* next */
1395 cb -= cbWrite;
1396 pvSrc = (uint8_t *)pvSrc + cbWrite;
1397 GCPhysDst += cbWrite;
1398 }
1399 }
1400 else if (pRam->pvHC)
1401 {
1402 /* write */
1403 size_t cbWrite = pRam->cb - off;
1404 if (cbWrite >= cb)
1405 {
1406 memcpy((uint8_t *)pRam->pvHC + off, pvSrc, cb);
1407 return VINF_SUCCESS;
1408 }
1409 memcpy((uint8_t *)pRam->pvHC + off, pvSrc, cbWrite);
1410
1411 /* next */
1412 cb -= cbWrite;
1413 GCPhysDst += cbWrite;
1414 pvSrc = (uint8_t *)pvSrc + cbWrite;
1415 }
1416 else
1417 return VERR_PGM_PHYS_PAGE_RESERVED;
1418 }
1419 else if (GCPhysDst < pRam->GCPhysLast)
1420 break;
1421 }
1422 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1423}
1424
1425
1426/**
1427 * Read from guest physical memory referenced by GC pointer.
1428 *
1429 * This function uses the current CR3/CR0/CR4 of the guest and will
1430 * bypass access handlers and not set any accessed bits.
1431 *
1432 * @returns VBox status.
1433 * @param pVM VM handle.
1434 * @param pvDst The destination address.
1435 * @param GCPtrSrc The source address (GC pointer).
1436 * @param cb The number of bytes to read.
1437 */
1438PGMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1439{
1440 /*
1441 * Anything to do?
1442 */
1443 if (!cb)
1444 return VINF_SUCCESS;
1445
1446 /*
1447 * Optimize reads within a single page.
1448 */
1449 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1450 {
1451 void *pvSrc;
1452 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrSrc, &pvSrc);
1453 if (VBOX_FAILURE(rc))
1454 return rc;
1455 memcpy(pvDst, pvSrc, cb);
1456 return VINF_SUCCESS;
1457 }
1458
1459 /*
1460 * Page by page.
1461 */
1462 for (;;)
1463 {
1464 /* convert */
1465 void *pvSrc;
1466 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrSrc, &pvSrc);
1467 if (VBOX_FAILURE(rc))
1468 return rc;
1469
1470 /* copy */
1471 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
1472 if (cbRead >= cb)
1473 {
1474 memcpy(pvDst, pvSrc, cb);
1475 return VINF_SUCCESS;
1476 }
1477 memcpy(pvDst, pvSrc, cbRead);
1478
1479 /* next */
1480 cb -= cbRead;
1481 pvDst = (uint8_t *)pvDst + cbRead;
1482 GCPtrSrc += cbRead;
1483 }
1484}
1485
1486
1487/**
1488 * Write to guest physical memory referenced by GC pointer.
1489 *
1490 * This function uses the current CR3/CR0/CR4 of the guest and will
1491 * bypass access handlers and not set dirty or accessed bits.
1492 *
1493 * @returns VBox status.
1494 * @param pVM VM handle.
1495 * @param GCPtrDst The destination address (GC pointer).
1496 * @param pvSrc The source address.
1497 * @param cb The number of bytes to write.
1498 */
1499PGMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
1500{
1501 /*
1502 * Anything to do?
1503 */
1504 if (!cb)
1505 return VINF_SUCCESS;
1506
1507 LogFlow(("PGMPhysWriteGCPtr: %VGv %d\n", GCPtrDst, cb));
1508
1509 /*
1510 * Optimize writes within a single page.
1511 */
1512 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1513 {
1514 void *pvDst;
1515 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1516 if (VBOX_FAILURE(rc))
1517 return rc;
1518 memcpy(pvDst, pvSrc, cb);
1519 return VINF_SUCCESS;
1520 }
1521
1522 /*
1523 * Page by page.
1524 */
1525 for (;;)
1526 {
1527 /* convert */
1528 void *pvDst;
1529 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1530 if (VBOX_FAILURE(rc))
1531 return rc;
1532
1533 /* copy */
1534 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
1535 if (cbWrite >= cb)
1536 {
1537 memcpy(pvDst, pvSrc, cb);
1538 return VINF_SUCCESS;
1539 }
1540 memcpy(pvDst, pvSrc, cbWrite);
1541
1542 /* next */
1543 cb -= cbWrite;
1544 pvSrc = (uint8_t *)pvSrc + cbWrite;
1545 GCPtrDst += cbWrite;
1546 }
1547}
1548
1549
1550/**
1551 * Write to guest physical memory referenced by GC pointer and update the PTE.
1552 *
1553 * This function uses the current CR3/CR0/CR4 of the guest and will
1554 * bypass access handlers and set any dirty and accessed bits in the PTE.
1555 *
1556 * If you don't want to set the dirty bit, use PGMPhysWriteGCPtr().
1557 *
1558 * @returns VBox status.
1559 * @param pVM VM handle.
1560 * @param GCPtrDst The destination address (GC pointer).
1561 * @param pvSrc The source address.
1562 * @param cb The number of bytes to write.
1563 */
1564PGMDECL(int) PGMPhysWriteGCPtrDirty(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
1565{
1566 /*
1567 * Anything to do?
1568 */
1569 if (!cb)
1570 return VINF_SUCCESS;
1571
1572 /*
1573 * Optimize writes within a single page.
1574 */
1575 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1576 {
1577 void *pvDst;
1578 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1579 if (VBOX_FAILURE(rc))
1580 return rc;
1581 memcpy(pvDst, pvSrc, cb);
1582 rc = PGMGstModifyPage(pVM, GCPtrDst, cb, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
1583 AssertRC(rc);
1584 return VINF_SUCCESS;
1585 }
1586
1587 /*
1588 * Page by page.
1589 */
1590 for (;;)
1591 {
1592 /* convert */
1593 void *pvDst;
1594 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1595 if (VBOX_FAILURE(rc))
1596 return rc;
1597
1598 /* mark the guest page as accessed and dirty. */
1599 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
1600 AssertRC(rc);
1601
1602 /* copy */
1603 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
1604 if (cbWrite >= cb)
1605 {
1606 memcpy(pvDst, pvSrc, cb);
1607 return VINF_SUCCESS;
1608 }
1609 memcpy(pvDst, pvSrc, cbWrite);
1610
1611 /* next */
1612 cb -= cbWrite;
1613 GCPtrDst += cbWrite;
1614 pvSrc = (char *)pvSrc + cbWrite;
1615 }
1616}
1617
1618#endif /* !IN_GC */
1619
1620
1621
1622/**
1623 * Performs a read of guest virtual memory for instruction emulation.
1624 *
1625 * This will check permissions, raise exceptions and update the access bits.
1626 *
1627 * The current implementation will bypass all access handlers. It may later be
1628 * changed to at least respect MMIO.
1629 *
1630 *
1631 * @returns VBox status code suitable to scheduling.
1632 * @retval VINF_SUCCESS if the read was performed successfully.
1633 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
1634 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
1635 *
1636 * @param pVM The VM handle.
1637 * @param pCtxCore The context core.
1638 * @param pvDst Where to put the bytes we've read.
1639 * @param GCPtrSrc The source address.
1640 * @param cb The number of bytes to read. Not more than a page.
1641 *
1642 * @remark This function will dynamically map physical pages in GC. This may unmap
1643 * mappings done by the caller. Be careful!
1644 */
1645PGMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
1646{
1647 Assert(cb <= PAGE_SIZE);
1648
1649/** @todo r=bird: This isn't perfect!
1650 * -# It's not checking for reserved bits being 1.
1651 * -# It's not correctly dealing with the access bit.
1652 * -# It's not respecting MMIO memory or any other access handlers.
1653 */
1654 /*
1655 * 1. Translate virtual to physical. This may fault.
1656 * 2. Map the physical address.
1657 * 3. Do the read operation.
1658 * 4. Set access bits if required.
1659 */
1660 int rc;
1661 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
1662 if (cb <= cb1)
1663 {
1664 /*
1665 * Not crossing pages.
1666 */
1667 RTGCPHYS GCPhys;
1668 uint64_t fFlags;
1669 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
1670 if (VBOX_SUCCESS(rc))
1671 {
1672 /** @todo we should check reserved bits ... */
1673 void *pvSrc;
1674 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
1675 switch (rc)
1676 {
1677 case VINF_SUCCESS:
1678Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
1679 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
1680 break;
1681 case VERR_PGM_PHYS_PAGE_RESERVED:
1682 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
1683 memset(pvDst, 0, cb);
1684 break;
1685 default:
1686 return rc;
1687 }
1688
1689 /** @todo access bit emulation isn't 100% correct. */
1690 if (!(fFlags & X86_PTE_A))
1691 {
1692 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
1693 AssertRC(rc);
1694 }
1695 return VINF_SUCCESS;
1696 }
1697 }
1698 else
1699 {
1700 /*
1701 * Crosses pages.
1702 */
1703 unsigned cb2 = cb - cb1;
1704 uint64_t fFlags1;
1705 RTGCPHYS GCPhys1;
1706 uint64_t fFlags2;
1707 RTGCPHYS GCPhys2;
1708 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
1709 if (VBOX_SUCCESS(rc))
1710 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
1711 if (VBOX_SUCCESS(rc))
1712 {
1713 /** @todo we should check reserved bits ... */
1714AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%VGv\n", cb, cb1, cb2, GCPtrSrc));
1715 void *pvSrc1;
1716 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
1717 switch (rc)
1718 {
1719 case VINF_SUCCESS:
1720 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
1721 break;
1722 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
1723 memset(pvDst, 0, cb1);
1724 break;
1725 default:
1726 return rc;
1727 }
1728
1729 void *pvSrc2;
1730 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
1731 switch (rc)
1732 {
1733 case VINF_SUCCESS:
1734 memcpy((uint8_t *)pvDst + cb2, pvSrc2, cb2);
1735 break;
1736 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
1737 memset((uint8_t *)pvDst + cb2, 0, cb2);
1738 break;
1739 default:
1740 return rc;
1741 }
1742
1743 if (!(fFlags1 & X86_PTE_A))
1744 {
1745 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
1746 AssertRC(rc);
1747 }
1748 if (!(fFlags2 & X86_PTE_A))
1749 {
1750 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
1751 AssertRC(rc);
1752 }
1753 return VINF_SUCCESS;
1754 }
1755 }
1756
1757 /*
1758 * Raise a #PF.
1759 */
1760 uint32_t uErr;
1761
1762 /* Get the current privilege level. */
1763 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
1764 switch (rc)
1765 {
1766 case VINF_SUCCESS:
1767 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
1768 break;
1769
1770 case VERR_PAGE_NOT_PRESENT:
1771 case VERR_PAGE_TABLE_NOT_PRESENT:
1772 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
1773 break;
1774
1775 default:
1776 AssertMsgFailed(("rc=%Vrc GCPtrSrc=%VGv cb=%#x\n", rc, GCPtrSrc, cb));
1777 return rc;
1778 }
1779 Log(("PGMPhysInterpretedRead: GCPtrSrc=%VGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
1780 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
1781}
1782
1783/// @todo PGMDECL(int) PGMPhysInterpretedWrite(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
1784
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette