Changeset 13935 in vbox
- Timestamp:
- Nov 6, 2008 8:23:28 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 38959
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/src/VBox/VMM/PGMInternal.h ¶
r13933 r13935 2032 2032 /** @name AMD64 Guest Paging. 2033 2033 * @{ */ 2034 /** The guest's page directory pointer table, HCpointer. */2035 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2036 R3R0PTRTYPE(PX86PML4) pGstPaePML4HC; 2037 #else 2038 R 3R0PTRTYPE(PX86PML4) pGstPaePML4HC;2034 /** The guest's page directory pointer table, R3 pointer. */ 2035 R3PTRTYPE(PX86PML4) pGstAmd64PML4R3; 2036 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2037 /** The guest's page directory pointer table, R0 pointer. */ 2038 R0PTRTYPE(PX86PML4) pGstAmd64PML4R0; 2039 2039 #endif 2040 2040 /** @} */ … … 3558 3558 3559 3559 /** 3560 * Gets the page map level-4 pointer for the guest. 3561 * 3562 * @returns Pointer to the PML4 page. 3563 * @param pPGM Pointer to the PGM instance data. 3564 */ 3565 DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGM pPGM) 3566 { 3567 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3568 PX86PML4 pGuestPml4; 3569 int rc = PGMDynMapGCPage(PGM2VM(pPGM), pPGM->GCPhysCR3, (void **)pGuestPml4); 3570 AssertRCReturn(rc, NULL); 3571 return pGuestPml4; 3572 #else 3573 Assert(pPGM->CTX_SUFF(pGstAmd64PML4)); 3574 return pPGM->CTX_SUFF(pGstAmd64PML4); 3575 #endif 3576 } 3577 3578 3579 /** 3580 * Gets the pointer to a page map level-4 entry. 3581 * 3582 * @returns Pointer to the PML4 entry. 3583 * @param pPGM Pointer to the PGM instance data. 3584 * @param iPml4e The index. 3585 */ 3586 DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4e) 3587 { 3588 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3589 PX86PML4 pGuestPml4; 3590 int rc = PGMDynMapGCPage(PGM2VM(pPGM), pPGM->GCPhysCR3, (void **)pGuestPml4); 3591 AssertRCReturn(rc, NULL); 3592 return &pGuestPml4->a[iPml4e]; 3593 #else 3594 Assert(pPGM->CTX_SUFF(pGstAmd64PML4)); 3595 return &pPGM->CTX_SUFF(pGstAmd64PML4)->a[iPml4e]; 3596 #endif 3597 } 3598 3599 3600 /** 3601 * Gets a page map level-4 entry. 3602 * 3603 * @returns The PML4 entry. 3604 * @param pPGM Pointer to the PGM instance data. 3605 * @param iPml4e The index. 3606 */ 3607 DECLINLINE(X86PGPAEUINT) pgmGstGetLongModePML4E(PPGM pPGM, unsigned int iPml4e) 3608 { 3609 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3610 PX86PML4 pGuestPml4; 3611 int rc = PGMDynMapGCPage(PGM2VM(pPGM), pPGM->GCPhysCR3, (void **)pGuestPml4); 3612 AssertRCReturn(rc, 0); 3613 return pGuestPml4->a[iPml4e].u; 3614 #else 3615 Assert(pPGM->CTX_SUFF(pGstAmd64PML4)); 3616 return pPGM->CTX_SUFF(pGstAmd64PML4)->a[iPml4e].u; 3617 #endif 3618 } 3619 3620 3621 /** 3560 3622 * Gets the page directory pointer entry for the specified address. 3561 3623 * … … 3568 3630 DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e) 3569 3631 { 3570 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3571 3572 Assert(pPGM->pGstPaePML4HC); 3573 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e]; 3574 if ((*ppPml4e)->n.u1Present) 3632 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3633 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3634 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4e]; 3635 if (pPml4e->n.u1Present) 3575 3636 { 3576 3637 PX86PDPT pPdpt; 3577 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdpt); 3578 if (RT_FAILURE(rc)) 3579 { 3580 AssertFailed(); 3581 return NULL; 3582 } 3583 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3638 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPml4e->u & X86_PML4E_PG_MASK, &pPdpt); 3639 AssertRCReturn(rc, NULL); 3640 3641 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3584 3642 return &pPdpt->a[iPdPt]; 3585 3643 } … … 3600 3658 DECLINLINE(uint64_t) pgmGstGetLongModePDE(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe) 3601 3659 { 3602 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3603 3604 Assert(pPGM->pGstPaePML4HC); 3605 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e]; 3606 if ((*ppPml4e)->n.u1Present) 3660 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3661 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3662 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4e]; 3663 if (pPml4e->n.u1Present) 3607 3664 { 3608 PX86PDPT pPdptTemp; 3609 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdptTemp); 3610 if (RT_FAILURE(rc)) 3665 PCX86PDPT pPdptTemp; 3666 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp); 3667 AssertRCReturn(rc, 0); 3668 3669 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3670 *pPdpe = pPdptTemp->a[iPdPt]; 3671 if (pPdptTemp->a[iPdPt].n.u1Present) 3611 3672 { 3612 AssertFailed(); 3613 return 0; 3614 } 3615 3616 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3617 *pPdpe = pPdptTemp->a[iPdPt]; 3618 if (pPdpe->n.u1Present) 3619 { 3620 PX86PDPAE pPD; 3621 3622 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD); 3623 if (RT_FAILURE(rc)) 3624 { 3625 AssertFailed(); 3626 return 0; 3627 } 3673 PCX86PDPAE pPD; 3674 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3675 AssertRCReturn(rc, 0); 3676 3628 3677 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3629 3678 return pPD->a[iPD].u; … … 3644 3693 DECLINLINE(uint64_t) pgmGstGetLongModePDE(PPGM pPGM, RTGCUINTPTR64 GCPtr) 3645 3694 { 3646 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3647 3648 Assert(pPGM->pGstPaePML4HC); 3649 if (pPGM->pGstPaePML4HC->a[iPml4e].n.u1Present) 3695 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3696 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3697 if (pGuestPml4->a[iPml4e].n.u1Present) 3650 3698 { 3651 PX86PDPT pPdptTemp; 3652 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPGM->pGstPaePML4HC->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp); 3653 if (RT_FAILURE(rc)) 3654 { 3655 AssertFailed(); 3656 return 0; 3657 } 3658 3659 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3699 PCX86PDPT pPdptTemp; 3700 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp); 3701 AssertRCReturn(rc, 0); 3702 3703 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3660 3704 if (pPdptTemp->a[iPdPt].n.u1Present) 3661 3705 { 3662 PX86PDPAE pPD; 3663 3706 PCX86PDPAE pPD; 3664 3707 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3665 if (RT_FAILURE(rc)) 3666 { 3667 AssertFailed(); 3668 return 0; 3669 } 3708 AssertRCReturn(rc, 0); 3709 3670 3710 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3671 3711 return pPD->a[iPD].u; … … 3686 3726 DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr) 3687 3727 { 3688 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3689 3690 Assert(pPGM->pGstPaePML4HC); 3691 if (pPGM->pGstPaePML4HC->a[iPml4e].n.u1Present) 3728 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3729 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3730 if (pGuestPml4->a[iPml4e].n.u1Present) 3692 3731 { 3693 PX86PDPT pPdptTemp; 3694 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPGM->pGstPaePML4HC->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp); 3695 if (RT_FAILURE(rc)) 3696 { 3697 AssertFailed(); 3698 return NULL; 3699 } 3700 3701 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3732 PCX86PDPT pPdptTemp; 3733 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp); 3734 AssertRCReturn(rc, NULL); 3735 3736 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3702 3737 if (pPdptTemp->a[iPdPt].n.u1Present) 3703 3738 { 3704 3739 PX86PDPAE pPD; 3705 3706 3740 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3707 if (RT_FAILURE(rc)) 3708 { 3709 AssertFailed(); 3710 return NULL; 3711 } 3741 AssertRCReturn(rc, NULL); 3742 3712 3743 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3713 3744 return &pPD->a[iPD]; … … 3731 3762 DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD) 3732 3763 { 3733 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3734 3735 Assert(pPGM->pGstPaePML4HC); 3736 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e]; 3737 if ((*ppPml4e)->n.u1Present) 3764 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3765 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3766 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4e]; 3767 if (pPml4e->n.u1Present) 3738 3768 { 3739 PX86PDPT pPdptTemp; 3740 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdptTemp); 3741 if (RT_FAILURE(rc)) 3742 { 3743 AssertFailed(); 3744 return 0; 3745 } 3746 3747 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3769 PCX86PDPT pPdptTemp; 3770 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp); 3771 AssertRCReturn(rc, NULL); 3772 3773 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3748 3774 *pPdpe = pPdptTemp->a[iPdPt]; 3749 if (pPdp e->n.u1Present)3775 if (pPdptTemp->a[iPdPt].n.u1Present) 3750 3776 { 3751 3777 PX86PDPAE pPD; 3752 3753 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD); 3754 if (RT_FAILURE(rc)) 3755 { 3756 AssertFailed(); 3757 return 0; 3758 } 3778 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3779 AssertRCReturn(rc, NULL); 3780 3759 3781 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3760 3782 return pPD; … … 3776 3798 DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, unsigned *piPD) 3777 3799 { 3778 PX86PML4E pPml4e; 3779 PX86PDPE pPdpe; 3780 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3781 3782 Assert(pPGM->pGstPaePML4HC); 3783 pPml4e = &pPGM->pGstPaePML4HC->a[iPml4e]; 3784 if (pPml4e->n.u1Present) 3800 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3801 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3802 if (pGuestPml4->a[iPml4e].n.u1Present) 3785 3803 { 3786 PX86PDPT pPdptTemp; 3787 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp); 3788 if (RT_FAILURE(rc)) 3789 { 3790 AssertFailed(); 3791 return 0; 3792 } 3793 3794 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3795 pPdpe = &pPdptTemp->a[iPdPt]; 3796 if (pPdpe->n.u1Present) 3804 PCX86PDPT pPdptTemp; 3805 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp); 3806 AssertRCReturn(rc, NULL); 3807 3808 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3809 if (pPdptTemp->a[iPdPt].n.u1Present) 3797 3810 { 3798 3811 PX86PDPAE pPD; 3799 3800 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD); 3801 if (RT_FAILURE(rc)) 3802 { 3803 AssertFailed(); 3804 return 0; 3805 } 3812 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD); 3813 AssertRCReturn(rc, NULL); 3814 3806 3815 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3807 3816 return pPD; 3808 3817 } 3809 3818 } 3810 return 0;3819 return NULL; 3811 3820 } 3812 3821 -
TabularUnified trunk/src/VBox/VMM/VMMAll/PGMAll.cpp ¶
r13933 r13935 888 888 889 889 /** 890 * Syncs the SHADOW page directory pointer for the specified address. Allocates 891 * backing pages in case the PDPT or PML4 entry is missing. 890 * Syncs the SHADOW page directory pointer for the specified address. 891 * 892 * Allocates backing pages in case the PDPT or PML4 entry is missing. 893 * 894 * The caller is responsible for making sure the guest has a valid PD before 895 * calling this function. 892 896 * 893 897 * @returns VBox status. … … 905 909 PX86PML4E pPml4e; 906 910 PPGMPOOLPAGE pShwPage; 911 X86PML4E Pml4eGst; 907 912 int rc; 908 913 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM); … … 919 924 if (!fNestedPaging) 920 925 { 926 /** @todo why are we looking up the guest PML4E here? Isn't pGstPml4e 927 * trustworthy? (Remove pgmGstGetLongModePML4E if pGstPml4e and pGstPdpe 928 * are fine.) */ 921 929 Assert(pVM->pgm.s.pHCShwAmd64CR3); 922 Assert(pPGM->pGstPaePML4HC); 923 924 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e]; 925 926 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, 930 Pml4eGst.u = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4e); 931 932 rc = pgmPoolAlloc(pVM, Pml4eGst.u & X86_PML4E_PG_MASK, 927 933 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e, &pShwPage); 928 934 } … … 946 952 } 947 953 /* The PDPT was cached or created; hook it up now. */ 948 pPml4e->u |= 949 954 pPml4e->u |= pShwPage->Core.Key 955 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT)); 950 956 951 957 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; … … 959 965 if (!fNestedPaging) 960 966 { 961 Assert(pPGM->pGstPaePML4HC);962 963 P X86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];964 PX86PDPT 965 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);967 /** @todo why are we looking up the guest PDPTE here? Isn't pGstPdpe 968 * trustworthy? */ 969 Pml4eGst.u = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4e); 970 PX86PDPT pPdptGst; 971 rc = PGM_GCPHYS_2_PTR(pVM, Pml4eGst.u & X86_PML4E_PG_MASK, &pPdptGst); 966 972 AssertRCReturn(rc, rc); 967 973 … … 988 994 } 989 995 /* The PD was cached or created; hook it up now. */ 990 pPdpe->u |= 991 996 pPdpe->u |= pShwPage->Core.Key 997 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT)); 992 998 993 999 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); -
TabularUnified trunk/src/VBox/VMM/VMMAll/PGMAllBth.h ¶
r13933 r13935 3709 3709 RTGCPHYS GCPhysPdptSrc; 3710 3710 3711 pPml4eSrc = &pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPml4e];3711 pPml4eSrc = pgmGstGetLongModePML4EPtr(&pVM->pgm.s, iPml4e); 3712 3712 pPml4eDst = &pVM->pgm.s.CTXMID(p,PaePML4)->a[iPml4e]; 3713 3713 -
TabularUnified trunk/src/VBox/VMM/VMMAll/PGMAllGst.h ¶
r13933 r13935 505 505 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */ 506 506 } 507 507 508 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 508 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);509 510 pVM->pgm.s.pGst PaePML4HC = (R3R0PTRTYPE(PX86PML4))HCPtrGuestCR3;511 509 pVM->pgm.s.pGstAmd64PML4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3; 510 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 511 pVM->pgm.s.pGstAmd64PML4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3; 512 # endif 512 513 if (!HWACCMIsNestedPagingActive(pVM)) 513 514 { 515 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 514 516 if (pVM->pgm.s.pHCShwAmd64CR3) 515 517 { … … 587 589 588 590 #elif PGM_GST_TYPE == PGM_TYPE_AMD64 589 pVM->pgm.s.pGstPaePML4HC = 0; 591 pVM->pgm.s.pGstAmd64PML4R3 = 0; 592 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 593 pVM->pgm.s.pGstAmd64PML4R0 = 0; 594 # endif 590 595 if (!HWACCMIsNestedPagingActive(pVM)) 591 596 {
Note:
See TracChangeset
for help on using the changeset viewer.