Changeset 13933 in vbox
- Timestamp:
- Nov 6, 2008 6:55:03 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r13919 r13933 1142 1142 pVM->pgm.s.fA20Enabled = true; 1143 1143 pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */ 1144 pVM->pgm.s.pGstPaePDPTHC = NULL; 1145 pVM->pgm.s.pGstPaePDPTGC = 0; 1146 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apGstPaePDsHC); i++) 1147 { 1148 pVM->pgm.s.apGstPaePDsHC[i] = NULL; 1149 pVM->pgm.s.apGstPaePDsGC[i] = 0; 1144 pVM->pgm.s.pGstPaePDPTR3 = NULL; 1145 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1146 pVM->pgm.s.pGstPaePDPTR0 = NIL_RTR0PTR; 1147 #endif 1148 pVM->pgm.s.pGstPaePDPTRC = NIL_RTRCPTR; 1149 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apGstPaePDsR3); i++) 1150 { 1151 pVM->pgm.s.apGstPaePDsR3[i] = NULL; 1152 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1153 pVM->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR; 1154 #endif 1155 pVM->pgm.s.apGstPaePDsRC[i] = NIL_RTRCPTR; 1150 1156 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 1151 1157 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS; … … 1885 1891 pVM->pgm.s.pGC32BitPD += offDelta; 1886 1892 pVM->pgm.s.pGuestPDRC += offDelta; 1887 AssertCompile(RT_ELEMENTS(pVM->pgm.s.apGCPaePDs) == RT_ELEMENTS(pVM->pgm.s.apGstPaePDs GC));1893 AssertCompile(RT_ELEMENTS(pVM->pgm.s.apGCPaePDs) == RT_ELEMENTS(pVM->pgm.s.apGstPaePDsRC)); 1888 1894 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apGCPaePDs); i++) 1889 1895 { 1890 1896 pVM->pgm.s.apGCPaePDs[i] += offDelta; 1891 pVM->pgm.s.apGstPaePDs GC[i] += offDelta;1892 } 1893 pVM->pgm.s.pGstPaePDPT GC += offDelta;1897 pVM->pgm.s.apGstPaePDsRC[i] += offDelta; 1898 } 1899 pVM->pgm.s.pGstPaePDPTRC += offDelta; 1894 1900 pVM->pgm.s.pGCPaePDPT += offDelta; 1895 1901 -
trunk/src/VBox/VMM/PGMGst.h
r13919 r13933 352 352 for (unsigned i = 0; i < 4; i++) 353 353 { 354 if ( pVM->pgm.s.pGstPaePDPT HC->a[i].n.u1Present355 && (pVM->pgm.s.pGstPaePDPT HC->a[i].u & X86_PDPE_PG_MASK) != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])354 if ( pVM->pgm.s.pGstPaePDPTR3->a[i].n.u1Present 355 && (pVM->pgm.s.pGstPaePDPTR3->a[i].u & X86_PDPE_PG_MASK) != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]) 356 356 { 357 357 Log(("pgmR3GstPAEWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n", 358 i, pVM->pgm.s.pGstPaePDPT HC->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));358 i, pVM->pgm.s.pGstPaePDPTR3->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])); 359 359 /* 360 360 * The PD has changed. -
trunk/src/VBox/VMM/PGMInternal.h
r13919 r13933 2003 2003 * @{ */ 2004 2004 /** The guest's page directory pointer table, static GC mapping. */ 2005 RCPTRTYPE(PX86PDPT) pGstPaePDPTGC; 2006 /** The guest's page directory pointer table, HC pointer. */ 2007 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2008 R3PTRTYPE(PX86PDPT) pGstPaePDPTHC; 2009 #else 2010 R3R0PTRTYPE(PX86PDPT) pGstPaePDPTHC; 2011 #endif 2012 /** The guest's page directories, HC pointers. 2005 RCPTRTYPE(PX86PDPT) pGstPaePDPTRC; 2006 /** The guest's page directory pointer table, R3 pointer. */ 2007 R3PTRTYPE(PX86PDPT) pGstPaePDPTR3; 2008 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2009 /** The guest's page directory pointer table, R0 pointer. */ 2010 R0PTRTYPE(PX86PDPT) pGstPaePDPTR0; 2011 #endif 2012 2013 /** The guest's page directories, R3 pointers. 2013 2014 * These are individual pointers and don't have to be adjecent. 2014 2015 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */ 2015 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2016 R3PTRTYPE(PX86PDPAE) apGstPaePDsHC[4]; 2017 #else 2018 R3R0PTRTYPE(PX86PDPAE) apGstPaePDsHC[4]; 2016 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4]; 2017 /** The guest's page directories, R0 pointers. 2018 * Same restrictions as apGstPaePDsR3. */ 2019 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2020 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4]; 2019 2021 #endif 2020 2022 /** The guest's page directories, static GC mapping. 2021 * Unlike the HCarray the first entry can be accessed as a 2048 entry PD.2023 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD. 2022 2024 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */ 2023 RCPTRTYPE(PX86PDPAE) apGstPaePDs GC[4];2025 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4]; 2024 2026 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */ 2025 2027 RTGCPHYS aGCPhysGstPaePDs[4]; … … 3347 3349 3348 3350 /** 3349 * Gets the page directory for the specified address. 3351 * Gets the guest page directory pointer table. 3352 * 3353 * @returns Pointer to the page directory in question. 3354 * @returns NULL if the page directory is not present or on an invalid page. 3355 * @param pPGM Pointer to the PGM instance data. 3356 */ 3357 DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGM pPGM) 3358 { 3359 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3360 PX86PDPT pGuestPDPT = 0; 3361 int rc = PGMDynMapGCPage(PGM2VM(pPGM), pPGM->GCPhysCR3, (void **)pGuestPDPT); 3362 AssertRCReturn(rc, 0); 3363 return pGuestPDPT; 3364 #else 3365 return pPGM->CTX_SUFF(pGstPaePDPT); 3366 #endif 3367 } 3368 3369 3370 /** 3371 * Gets the guest page directory pointer table entry for the specified address. 3350 3372 * 3351 3373 * @returns Pointer to the page directory in question. … … 3354 3376 * @param GCPtr The address. 3355 3377 */ 3378 DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGM pPGM, RTGCPTR GCPtr) 3379 { 3380 AssertGCPtr32(GCPtr); 3381 3382 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3383 PX86PDPT pGuestPDPT = 0; 3384 int rc = PGMDynMapGCPage(PGM2VM(pPGM), pPGM->GCPhysCR3, (void **)pGuestPDPT); 3385 AssertRCReturn(rc, 0); 3386 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE]; 3387 #else 3388 return &pPGM->CTX_SUFF(pGstPaePDPT)->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE]; 3389 #endif 3390 } 3391 3392 3393 /** 3394 * Gets the page directory for the specified address. 3395 * 3396 * @returns Pointer to the page directory in question. 3397 * @returns NULL if the page directory is not present or on an invalid page. 3398 * @param pPGM Pointer to the PGM instance data. 3399 * @param GCPtr The address. 3400 */ 3356 3401 DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCUINTPTR GCPtr) 3357 3402 { 3358 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT; 3359 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present) 3403 AssertGCPtr32(GCPtr); 3404 3405 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3406 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM); 3407 AssertReturn(pGuestPDPT, 0); 3408 #else 3409 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePDPT); 3410 #endif 3411 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 3412 if (pGuestPDPT->a[iPdPt].n.u1Present) 3360 3413 { 3361 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3362 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt]; 3414 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3415 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3416 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]; 3417 #endif 3363 3418 3364 3419 /* cache is out-of-sync. */ 3365 3420 PX86PDPAE pPD; 3366 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);3421 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3367 3422 if (RT_SUCCESS(rc)) 3368 3423 return pPD; 3369 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));3370 /* returning N IL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s.*/3424 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt].u)); 3425 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */ 3371 3426 } 3372 3427 return NULL; … … 3384 3439 DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCUINTPTR GCPtr) 3385 3440 { 3386 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT; 3387 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present) 3441 AssertGCPtr32(GCPtr); 3442 3443 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3444 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM); 3445 AssertReturn(pGuestPDPT, 0); 3446 #else 3447 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePDPT); 3448 #endif 3449 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 3450 if (pGuestPDPT->a[iPdPt].n.u1Present) 3388 3451 { 3389 3452 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3390 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3391 return &CTXSUFF(pPGM->apGstPaePDs)[iPdPt]->a[iPD]; 3453 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3454 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3455 return &pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]->a[iPD]; 3456 #endif 3392 3457 3393 3458 /* The cache is out-of-sync. */ 3394 3459 PX86PDPAE pPD; 3395 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);3460 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3396 3461 if (RT_SUCCESS(rc)) 3397 3462 return &pPD->a[iPD]; 3398 AssertMsgFailed(("Impossible! rc=%Rrc PDPE=%RX64\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));3399 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. */3463 AssertMsgFailed(("Impossible! rc=%Rrc PDPE=%RX64\n", rc, pGuestPDPT->a[iPdPt].u)); 3464 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */ 3400 3465 } 3401 3466 return NULL; … … 3413 3478 DECLINLINE(uint64_t) pgmGstGetPaePDE(PPGM pPGM, RTGCUINTPTR GCPtr) 3414 3479 { 3415 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT; 3416 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present) 3480 AssertGCPtr32(GCPtr); 3481 3482 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3483 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM); 3484 AssertReturn(pGuestPDPT, 0); 3485 #else 3486 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePDPT); 3487 #endif 3488 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 3489 if (pGuestPDPT->a[iPdPt].n.u1Present) 3417 3490 { 3418 3491 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3419 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3420 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt]->a[iPD].u; 3492 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3493 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3494 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]->a[iPD].u; 3495 #endif 3421 3496 3422 3497 /* cache is out-of-sync. */ 3423 3498 PX86PDPAE pPD; 3424 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);3499 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3425 3500 if (RT_SUCCESS(rc)) 3426 3501 return pPD->a[iPD].u; 3427 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));3502 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt].u)); 3428 3503 } 3429 return 0 ULL;3504 return 0; 3430 3505 } 3431 3506 … … 3440 3515 * @param GCPtr The address. 3441 3516 * @param piPD Receives the index into the returned page directory 3442 */ 3443 DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGM pPGM, RTGCUINTPTR GCPtr, unsigned *piPD) 3444 { 3445 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT; 3446 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present) 3517 * @param pPdpe Receives the page directory pointer entry. Optional. 3518 */ 3519 DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGM pPGM, RTGCUINTPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe) 3520 { 3521 AssertGCPtr32(GCPtr); 3522 3523 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3524 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM); 3525 AssertReturn(pGuestPDPT, 0); 3526 #else 3527 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePDPT); 3528 #endif 3529 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 3530 if (pPdpe) 3531 *pPdpe = pGuestPDPT->a[iPdPt]; 3532 if (pGuestPDPT->a[iPdPt].n.u1Present) 3447 3533 { 3448 3534 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3449 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3535 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3536 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt]) 3450 3537 { 3451 3538 *piPD = iPD; 3452 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt];3539 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]; 3453 3540 } 3541 #endif 3454 3542 3455 3543 /* cache is out-of-sync. */ 3456 3544 PX86PDPAE pPD; 3457 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);3545 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3458 3546 if (RT_SUCCESS(rc)) 3459 3547 { … … 3461 3549 return pPD; 3462 3550 } 3463 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));3551 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt].u)); 3464 3552 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */ 3465 3553 } … … 3523 3611 { 3524 3612 AssertFailed(); 3525 return 0 ULL;3613 return 0; 3526 3614 } 3527 3615 … … 3536 3624 { 3537 3625 AssertFailed(); 3538 return 0 ULL;3626 return 0; 3539 3627 } 3540 3628 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; … … 3542 3630 } 3543 3631 } 3544 return 0 ULL;3632 return 0; 3545 3633 } 3546 3634 … … 3566 3654 { 3567 3655 AssertFailed(); 3568 return 0 ULL;3656 return 0; 3569 3657 } 3570 3658 … … 3578 3666 { 3579 3667 AssertFailed(); 3580 return 0 ULL;3668 return 0; 3581 3669 } 3582 3670 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; … … 3584 3672 } 3585 3673 } 3586 return 0 ULL;3674 return 0; 3587 3675 } 3588 3676 … … 3654 3742 { 3655 3743 AssertFailed(); 3656 return 0 ULL;3744 return 0; 3657 3745 } 3658 3746 … … 3667 3755 { 3668 3756 AssertFailed(); 3669 return 0 ULL;3757 return 0; 3670 3758 } 3671 3759 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; … … 3673 3761 } 3674 3762 } 3675 return 0 ULL;3763 return 0; 3676 3764 } 3677 3765 … … 3701 3789 { 3702 3790 AssertFailed(); 3703 return 0 ULL;3791 return 0; 3704 3792 } 3705 3793 … … 3714 3802 { 3715 3803 AssertFailed(); 3716 return 0 ULL;3804 return 0; 3717 3805 } 3718 3806 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; … … 3720 3808 } 3721 3809 } 3722 return 0 ULL;3810 return 0; 3723 3811 } 3724 3812 -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r13832 r13933 826 826 && !(pPdpe->u & X86_PDPE_PG_MASK)) 827 827 { 828 PX86PDPE pPdptGst = &CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt];828 PX86PDPE pPdptGst = pgmGstGetPaePDPEPtr(pPGM, GCPtr); 829 829 830 830 Assert(!(pPdpe->u & X86_PDPE_PG_MASK)); … … 1230 1230 { 1231 1231 Assert(iPdpt <= 3); 1232 return p VM->pgm.s.CTXSUFF(pGstPaePDPT)->a[iPdpt & 3];1232 return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3]; 1233 1233 } 1234 1234 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r13919 r13933 109 109 # if PGM_GST_TYPE == PGM_TYPE_PAE 110 110 unsigned iPDSrc; 111 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCUINTPTR)pvFault, &iPDSrc );111 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCUINTPTR)pvFault, &iPDSrc, NULL); 112 112 113 113 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 … … 959 959 unsigned iPDSrc; 960 960 # if PGM_GST_TYPE == PGM_TYPE_PAE 961 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc);962 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte];961 X86PDPE PdpeSrc; 962 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc); 963 963 # else /* AMD64 */ 964 964 PX86PML4E pPml4eSrc; … … 1095 1095 1096 1096 # if PGM_GST_TYPE == PGM_TYPE_PAE 1097 /* Note: This shouldn't actually be necessary as we monitor the PDPT page for changes. */ 1097 /* 1098 * Update the shadow PDPE and free all the shadow PD entries if the PDPE is marked not present. 1099 * Note: This shouldn't actually be necessary as we monitor the PDPT page for changes. 1100 */ 1098 1101 if (!pPDSrc) 1099 1102 { … … 1103 1106 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1104 1107 1105 Assert(! (CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte].n.u1Present));1108 Assert(!PdpeSrc.n.u1Present); 1106 1109 LogFlow(("InvalidatePage: guest PDPE %d not present; clear shw pdpe\n", iPdpte)); 1107 /* for each page directory entry */ 1108 for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++) 1110 1111 /* for each page directory entry */ 1112 for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++) 1113 { 1114 if ( pPDEDst[iPD].n.u1Present 1115 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING)) 1109 1116 { 1110 if ( pPDEDst[iPD].n.u1Present 1111 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING)) 1112 { 1113 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdpte * X86_PG_PAE_ENTRIES + iPD); 1114 pPDEDst[iPD].u = 0; 1115 } 1116 } 1117 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdpte * X86_PG_PAE_ENTRIES + iPD); 1118 pPDEDst[iPD].u = 0; 1119 } 1120 } 1117 1121 if (!(pPdptDst->a[iPdpte].u & PGM_PLXFLAGS_MAPPING)) 1118 1122 pPdptDst->a[iPdpte].n.u1Present = 0; … … 2020 2024 Assert(pPdpeSrc); 2021 2025 2022 # else /* PAE */2023 PX86PDPE pPdpeSrc = &pVM->pgm.s.CTXSUFF(pGstPaePDPT)->a[(GCPtrPage >> GST_PDPT_SHIFT) & GST_PDPT_MASK];2024 # endif 2026 # else /* PAE */ 2027 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(&pVM->pgm.s, GCPtrPage); 2028 # endif /* PAE */ 2025 2029 2026 2030 /* … … 2813 2817 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2814 2818 unsigned iPDSrc; 2815 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc );2819 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL); 2816 2820 if (!pPDSrc) 2817 2821 return VINF_SUCCESS; /* not present */ … … 2932 2936 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2933 2937 unsigned iPDSrc; 2934 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc );2938 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL); 2935 2939 2936 2940 if (pPDSrc) … … 3225 3229 unsigned iPDSrc; 3226 3230 # if PGM_GST_TYPE == PGM_TYPE_PAE 3231 X86PDPE PdpeSrc; 3232 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpte << X86_PDPT_SHIFT, &iPDSrc, &PdpeSrc); 3227 3233 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 3228 3234 PX86PDEPAE pPDEDst = &pPDPAE->a[iPdpte * X86_PG_PAE_ENTRIES]; 3229 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpte << X86_PDPT_SHIFT, &iPDSrc);3230 3235 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); NOREF(pPdptDst); 3231 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte];3232 3236 3233 3237 if (pPDSrc == NULL) … … 3655 3659 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGuestPDRC, NULL, &HCPhysShw); 3656 3660 # else 3657 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGstPaePDPT GC, NULL, &HCPhysShw);3661 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGstPaePDPTRC, NULL, &HCPhysShw); 3658 3662 # endif 3659 3663 AssertRCReturn(rc, 1); … … 3764 3768 RTGCPHYS GCPhysPdeSrc; 3765 3769 # if PGM_GST_TYPE == PGM_TYPE_PAE 3770 X86PDPE PdpeSrc; 3771 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtr, &iPDSrc, &PdpeSrc); 3766 3772 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 3767 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtr, &iPDSrc);3768 3773 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); 3769 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte];3770 3774 # else 3771 3775 PX86PML4E pPml4eSrc; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r13923 r13933 459 459 460 460 # elif PGM_GST_TYPE == PGM_TYPE_PAE 461 unsigned offset = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK; 462 pVM->pgm.s.pGstPaePDPTHC = (R3R0PTRTYPE(PX86PDPT)) HCPtrGuestCR3; 463 pVM->pgm.s.pGstPaePDPTGC = (RCPTRTYPE(PX86PDPT)) ((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + offset); 464 Log(("Cached mapping %RGv\n", pVM->pgm.s.pGstPaePDPTGC)); 461 unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK; 462 pVM->pgm.s.pGstPaePDPTR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3; 463 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 464 pVM->pgm.s.pGstPaePDPTR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3; 465 # endif 466 pVM->pgm.s.pGstPaePDPTRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off); 467 Log(("Cached mapping %RGv\n", pVM->pgm.s.pGstPaePDPTRC)); 465 468 466 469 /* 467 470 * Map the 4 PDs too. 468 471 */ 472 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s); 469 473 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE; 470 474 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE) 471 475 { 472 if (p VM->pgm.s.CTXSUFF(pGstPaePDPT)->a[i].n.u1Present)476 if (pGuestPDPT->a[i].n.u1Present) 473 477 { 474 478 RTHCPTR HCPtr; 475 479 RTHCPHYS HCPhys; 476 RTGCPHYS GCPhys = p VM->pgm.s.CTXSUFF(pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK;480 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK; 477 481 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys); 478 482 if (RT_SUCCESS(rc2)) … … 480 484 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0); 481 485 AssertRCReturn(rc, rc); 482 pVM->pgm.s.apGstPaePDsHC[i] = (R3R0PTRTYPE(PX86PDPAE))HCPtr; 483 pVM->pgm.s.apGstPaePDsGC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr; 486 487 pVM->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr; 488 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 489 pVM->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr; 490 # endif 491 pVM->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr; 484 492 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys; 485 PGM_INVL_PG(GCPtr); 493 PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */ 486 494 continue; 487 495 } … … 489 497 } 490 498 491 pVM->pgm.s.apGstPaePDsHC[i] = 0; 492 pVM->pgm.s.apGstPaePDsGC[i] = 0; 499 pVM->pgm.s.apGstPaePDsR3[i] = 0; 500 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 501 pVM->pgm.s.apGstPaePDsR0[i] = 0; 502 # endif 503 pVM->pgm.s.apGstPaePDsRC[i] = 0; 493 504 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 494 PGM_INVL_PG(GCPtr); 505 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */ 495 506 } 496 507 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 … … 560 571 561 572 #elif PGM_GST_TYPE == PGM_TYPE_PAE 562 pVM->pgm.s.pGstPaePDPTHC = 0; 563 pVM->pgm.s.pGstPaePDPTGC = 0; 564 for (unsigned i=0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 565 { 566 pVM->pgm.s.apGstPaePDsHC[i] = 0; 567 pVM->pgm.s.apGstPaePDsGC[i] = 0; 573 pVM->pgm.s.pGstPaePDPTR3 = 0; 574 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 575 pVM->pgm.s.pGstPaePDPTR0 = 0; 576 # endif 577 pVM->pgm.s.pGstPaePDPTRC = 0; 578 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 579 { 580 pVM->pgm.s.apGstPaePDsR3[i] = 0; 581 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 582 pVM->pgm.s.apGstPaePDsR0[i] = 0; 583 # endif 584 pVM->pgm.s.apGstPaePDsRC[i] = 0; 568 585 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 569 586 } … … 666 683 * Do the 4 PDs. 667 684 */ 685 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s); 668 686 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 669 687 { 670 if ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].n.u1Present)671 { 672 RTGCPHYS GCPhys = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK;688 if (pGuestPDPT->a[i].n.u1Present) 689 { 690 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK; 673 691 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys) 674 692 { … … 1089 1107 * We'll simply check all of them instead of figuring out which one/two to check. 1090 1108 */ 1109 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s); 1091 1110 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 1092 1111 { 1093 if ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].n.u1Present1094 && ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK)1095 !=pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])1112 if ( pGuestPDPT->a[i].n.u1Present 1113 && (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK) 1114 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]) 1096 1115 { 1097 1116 /* … … 1107 1126 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3; 1108 1127 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n", 1109 i, CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));1128 i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])); 1110 1129 } 1111 1130 } … … 1155 1174 */ 1156 1175 RTGCUINTPTR i; 1176 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s); 1157 1177 for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 1158 if ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))1178 if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK)) 1159 1179 { 1160 1180 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT); 1161 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;1181 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK; 1162 1182 const unsigned iPD1 = offPD / sizeof(X86PDEPAE); 1163 1183 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE); … … 1167 1187 Assert(iPD2 < X86_PG_PAE_ENTRIES); 1168 1188 1169 # ifdef DEBUG1189 # ifdef LOG_ENABLED 1170 1190 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n", 1171 1191 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))); … … 1173 1193 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n", 1174 1194 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))); 1175 # endif1195 # endif 1176 1196 1177 1197 if (!pVM->pgm.s.fMappingsFixed) -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r13919 r13933 401 401 #endif 402 402 GEN_CHECK_OFF(PGM, pGuestPDRC); 403 GEN_CHECK_OFF(PGM, pGstPaePDPTHC); 404 GEN_CHECK_OFF(PGM, pGstPaePDPTGC); 405 GEN_CHECK_OFF(PGM, apGstPaePDsHC); 406 GEN_CHECK_OFF(PGM, apGstPaePDsGC); 403 GEN_CHECK_OFF(PGM, pGstPaePDPTR3); 404 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 405 GEN_CHECK_OFF(PGM, pGstPaePDPTR0); 406 #endif 407 GEN_CHECK_OFF(PGM, pGstPaePDPTRC); 408 GEN_CHECK_OFF(PGM, apGstPaePDsR3); 409 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 410 GEN_CHECK_OFF(PGM, apGstPaePDsR0); 411 #endif 412 GEN_CHECK_OFF(PGM, apGstPaePDsRC); 407 413 GEN_CHECK_OFF(PGM, aGCPhysGstPaePDs); 408 414 GEN_CHECK_OFF(PGM, aGCPhysGstPaePDsMonitored);
Note:
See TracChangeset
for help on using the changeset viewer.