Changeset 41391 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 22, 2012 2:06:53 PM (13 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r41386 r41391 1847 1847 break; 1848 1848 } 1849 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 1849 1850 1850 1851 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) … … 1949 1950 break; 1950 1951 } 1952 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 1953 1951 1954 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) 1952 1955 { … … 2034 2037 break; 2035 2038 } 2039 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 2036 2040 2037 2041 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r40449 r41391 173 173 const RTGCPHYS GCPhysFault = pGstWalk->Core.GCPhys; 174 174 # else 175 const RTGCPHYS GCPhysFault = (RTGCPHYS)pvFault;175 const RTGCPHYS GCPhysFault = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault); 176 176 # endif 177 177 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhysFault); … … 561 561 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 562 562 # else 563 rc = pgmPhysGetPageEx(pVM, (RTGCPHYS)pvFault, &pPage);563 rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault), &pPage); 564 564 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 565 565 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, … … 753 753 RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 754 754 # else 755 RTGCPHYS GCPhys = (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;755 RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK); 756 756 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 757 757 PPGMPAGE pPage; … … 1283 1283 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1284 1284 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 1285 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);1285 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | ((iPDDst & 1) * (PAGE_SIZE / 2))); 1286 1286 # endif 1287 1287 if (pShwPage->GCPhys == GCPhys) … … 1332 1332 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1333 1333 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 1334 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);1334 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | (GCPtrPage & (1 << X86_PD_PAE_SHIFT))); 1335 1335 # endif 1336 1336 if ( pShwPage->GCPhys == GCPhys … … 1909 1909 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1910 1910 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 1911 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);1911 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | ((iPDDst & 1) * (PAGE_SIZE / 2))); 1912 1912 # endif 1913 1913 } … … 1917 1917 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1918 1918 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 1919 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);1919 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | (GCPtrPage & (1 << X86_PD_PAE_SHIFT))); 1920 1920 # endif 1921 1921 } … … 2037 2037 */ 2038 2038 /* Calculate the GC physical address of this 4KB shadow page. */ 2039 GCPhys = GST_GET_BIG_PDE_GCPHYS(pVM, PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);2039 GCPhys = PGM_A20_APPLY(pVCpu, GST_GET_BIG_PDE_GCPHYS(pVM, PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK)); 2040 2040 /* Find ram range. */ 2041 2041 PPGMPAGE pPage; … … 2673 2673 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2674 2674 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 2675 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);2675 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | ((iPDDst & 1) * (PAGE_SIZE / 2))); 2676 2676 # endif 2677 2677 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage); … … 2689 2689 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2690 2690 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 2691 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);2691 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | (GCPtrPage & (1 << X86_PD_PAE_SHIFT))); 2692 2692 # endif 2693 2693 /* Determine the right kind of large page to avoid incorrect cached entry reuse. */ … … 2909 2909 if (pRam && GCPhys >= pRam->GCPhys) 2910 2910 { 2911 # ifndef PGM_WITH_A20 2911 2912 unsigned iHCPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT; 2913 # endif 2912 2914 do 2913 2915 { 2914 2916 /* Make shadow PTE. */ 2917 # ifdef PGM_WITH_A20 2918 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 2919 # else 2915 2920 PPGMPAGE pPage = &pRam->aPages[iHCPage]; 2921 # endif 2916 2922 SHWPTE PteDst; 2917 2923 … … 2977 2983 /* advance */ 2978 2984 GCPhys += PAGE_SIZE; 2985 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys); 2986 # ifndef PGM_WITH_A20 2979 2987 iHCPage++; 2988 # endif 2980 2989 iPTDst++; 2981 2990 } while ( iPTDst < RT_ELEMENTS(pPTDst->a) … … 2996 3005 } while ( iPTDst < RT_ELEMENTS(pPTDst->a) 2997 3006 && GCPhys < pRam->GCPhys); 3007 PGM_A20_APPLY_TO_VAR(pVCpu,GCPhys); 2998 3008 } 2999 3009 else … … 3092 3102 /* Check if we allocated a big page before for this 2 MB range. */ 3093 3103 PPGMPAGE pPage; 3094 rc = pgmPhysGetPageEx(pVM, GCPtrPage & X86_PDE2M_PAE_PG_MASK, &pPage);3104 rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, GCPtrPage & X86_PDE2M_PAE_PG_MASK), &pPage); 3095 3105 if (RT_SUCCESS(rc)) 3096 3106 { … … 3098 3108 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE) 3099 3109 { 3100 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageReused); 3101 AssertRelease(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); 3102 HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 3110 if (PGM_A20_IS_ENABLED(pVCpu)) 3111 { 3112 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageReused); 3113 AssertRelease(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); 3114 HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 3115 } 3116 else 3117 { 3118 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED); 3119 pVM->pgm.s.cLargePagesDisabled++; 3120 } 3103 3121 } 3104 else if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED) 3122 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED 3123 && PGM_A20_IS_ENABLED(pVCpu)) 3105 3124 { 3106 3125 /* Recheck the entire 2 MB range to see if we can use it again as a large page. */ … … 3113 3132 } 3114 3133 } 3115 else if (PGMIsUsingLargePages(pVM)) 3134 else if ( PGMIsUsingLargePages(pVM) 3135 && PGM_A20_IS_ENABLED(pVCpu)) 3116 3136 { 3117 3137 rc = pgmPhysAllocLargePage(pVM, GCPtrPage); … … 3161 3181 3162 3182 /* Virtual address = physical address */ 3163 GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK;3183 GCPhys = PGM_A20_APPLY(pVCpu, GCPtrPage & X86_PAGE_4K_BASE_MASK); 3164 3184 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage); 3165 3185 … … 3183 3203 for (unsigned iPTDst = 0; iPTDst < RT_ELEMENTS(pPTDst->a); iPTDst++) 3184 3204 { 3185 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT); 3205 RTGCPTR GCPtrCurPage = PGM_A20_APPLY(pVCpu, (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) 3206 | (iPTDst << PAGE_SHIFT)); 3186 3207 3187 3208 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], GCPtrCurPage, pShwPage, iPTDst); … … 3563 3584 * @returns VBox status code, no specials. 3564 3585 * @param pVCpu The VMCPU handle. 3565 * @param cr0 Guest context CR0 register 3566 * @param cr3 Guest context CR3 register 3567 * @param cr4 Guest context CR4 register 3586 * @param cr0 Guest context CR0 register. 3587 * @param cr3 Guest context CR3 register. Not subjected to the A20 3588 * mask. 3589 * @param cr4 Guest context CR4 register. 3568 3590 * @param fGlobal Including global page directories or not 3569 3591 */ … … 3690 3712 * Check that the Guest CR3 and all its mappings are correct. 3691 3713 */ 3692 AssertMsgReturn(pPGM->GCPhysCR3 == (cr3 & GST_CR3_PAGE_MASK),3714 AssertMsgReturn(pPGM->GCPhysCR3 == PGM_A20_APPLY(pVCpu, cr3 & GST_CR3_PAGE_MASK), 3693 3715 ("Invalid GCPhysCR3=%RGp cr3=%RGp\n", pPGM->GCPhysCR3, (RTGCPHYS)cr3), 3694 3716 false); … … 3701 3723 AssertRCReturn(rc, 1); 3702 3724 HCPhys = NIL_RTHCPHYS; 3703 rc = pgmRamGCPhys2HCPhys(pVM, cr3 & GST_CR3_PAGE_MASK, &HCPhys);3725 rc = pgmRamGCPhys2HCPhys(pVM, PGM_A20_APPLY(pVCpu, cr3 & GST_CR3_PAGE_MASK), &HCPhys); 3704 3726 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false); 3705 3727 # if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3) … … 3708 3730 rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys); 3709 3731 AssertRCReturn(rc, 1); 3710 AssertMsgReturn( (cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false);3732 AssertMsgReturn(PGM_A20_APPLY(pVCpu, cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false); 3711 3733 # endif 3712 3734 # endif /* !IN_RING0 */ … … 3755 3777 3756 3778 pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK); 3757 GCPhysPdptSrc = pPml4eSrc->u & X86_PML4E_PG_MASK;3779 GCPhysPdptSrc = PGM_A20_APPLY(pVCpu, pPml4eSrc->u & X86_PML4E_PG_MASK); 3758 3780 3759 3781 if (pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present) … … 3829 3851 3830 3852 pShwPde = pgmPoolGetPage(pPool, pPdpeDst->u & X86_PDPE_PG_MASK); 3831 GCPhysPdeSrc = P dpeSrc.u & X86_PDPE_PG_MASK;3853 GCPhysPdeSrc = PGM_A20_APPLY(pVCpu, PdpeSrc.u & X86_PDPE_PG_MASK); 3832 3854 3833 3855 if (pPdpeDst->n.u1Present != PdpeSrc.n.u1Present) … … 3940 3962 GCPhysGst = GST_GET_PDE_GCPHYS(PdeSrc); 3941 3963 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3942 GCPhysGst |= (iPDDst & 1) * (PAGE_SIZE / 2);3964 GCPhysGst = PGM_A20_APPLY(pVCpu, GCPhysGst | ((iPDDst & 1) * (PAGE_SIZE / 2))); 3943 3965 # endif 3944 3966 } … … 3956 3978 GCPhysGst = GST_GET_BIG_PDE_GCPHYS(pVM, PdeSrc); 3957 3979 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3958 GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT);3980 GCPhysGst = PGM_A20_APPLY(pVCpu, GCPhysGst | (GCPtr & RT_BIT(X86_PAGE_2M_SHIFT))); 3959 3981 # endif 3960 3982 } … … 3992 4014 */ 3993 4015 const GSTPT *pPTSrc; 3994 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc); 4016 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, PGM_A20_APPLY(pVCpu, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1)), 4017 &pPTSrc); 3995 4018 if (RT_FAILURE(rc)) 3996 4019 { … … 4047 4070 AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n", 4048 4071 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst), pPTSrc, iPT + offPTSrc, PdeSrc.au32[0], 4049 (uint64_t)GST_GET_PDE_GCPHYS(PdeSrc) + (iPT + offPTSrc) *sizeof(PteSrc)));4072 (uint64_t)GST_GET_PDE_GCPHYS(PdeSrc) + (iPT + offPTSrc) * sizeof(PteSrc))); 4050 4073 cErrors++; 4051 4074 continue; … … 4275 4298 for (unsigned iPT = 0, off = 0; 4276 4299 iPT < RT_ELEMENTS(pPTDst->a); 4277 iPT++, off += PAGE_SIZE, GCPhysGst += PAGE_SIZE)4300 iPT++, off += PAGE_SIZE, GCPhysGst = PGM_A20_APPLY(pVCpu, GCPhysGst + PAGE_SIZE)) 4278 4301 { 4279 4302 const SHWPTE PteDst = pPTDst->a[iPT]; … … 4414 4437 * @retval VINF_SUCCESS. 4415 4438 * 4416 * @param pVCpu The VMCPU handle. 4417 * @param GCPhysCR3 The physical address in the CR3 register. 4439 * @param pVCpu The VMCPU handle. 4440 * @param GCPhysCR3 The physical address in the CR3 register. (A20 4441 * mask already applied.) 4418 4442 */ 4419 4443 PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) … … 4481 4505 RTHCPTR HCPtr; 4482 4506 RTHCPHYS HCPhys; 4483 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;4507 RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, pGuestPDPT->a[i].u & X86_PDPE_PG_MASK); 4484 4508 pgmLock(pVM); 4485 4509 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r39078 r41391 167 167 pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 168 168 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 169 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys); 169 170 uint8_t fEffectiveXX = (uint8_t)pWalk->Pde.u 170 171 # if PGM_GST_TYPE == PGM_TYPE_AMD64 … … 515 516 RTGCPHYS GCPhysNew; 516 517 if (Pte.n.u1Present) 517 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;518 GCPhysNew = PGM_A20_APPLY(pVCpu, (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage); 518 519 else 519 520 GCPhysNew = NIL_RTGCPHYS; … … 567 568 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0) 568 569 { 569 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;570 RTGCPHYS GCPhysNew = PGM_A20_APPLY(pVCpu, GCPhys + (i4KB << PAGE_SHIFT) + offPage); 570 571 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew) 571 572 { -
trunk/src/VBox/VMM/include/PGMGstDefs.h
r36891 r41391 107 107 # endif 108 108 # endif 109 # define GST_GET_PTE_GCPHYS(Pte) ((Pte).u & GST_PTE_PG_MASK)109 # define GST_GET_PTE_GCPHYS(Pte) PGM_A20_APPLY(pVCpu, ((Pte).u & GST_PTE_PG_MASK)) 110 110 # define GST_GET_PDE_GCPHYS(Pde) (true && This_should_perhaps_not_be_used_in_this_context) //?? 111 111 # define GST_GET_BIG_PDE_GCPHYS(Pde) (true && This_should_perhaps_not_be_used_in_this_context) //?? … … 138 138 # define GST_PDE_PG_MASK X86_PDE_PG_MASK 139 139 # define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK 140 # define GST_GET_PTE_GCPHYS(Pte) ((Pte).u & GST_PDE_PG_MASK)141 # define GST_GET_PDE_GCPHYS(Pde) ((Pde).u & GST_PDE_PG_MASK)142 # define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) pgmGstGet4MBPhysPage((pVM), Pde)140 # define GST_GET_PTE_GCPHYS(Pte) PGM_A20_APPLY(pVCpu, ((Pte).u & GST_PDE_PG_MASK)) 141 # define GST_GET_PDE_GCPHYS(Pde) PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_PG_MASK)) 142 # define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) PGM_A20_APPLY(pVCpu, pgmGstGet4MBPhysPage((pVM), Pde)) 143 143 # define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A)) 144 144 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) \ … … 178 178 # define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK 179 179 # define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK 180 # define GST_GET_PTE_GCPHYS(Pte) ((Pte).u & GST_PTE_PG_MASK)181 # define GST_GET_PDE_GCPHYS(Pde) ((Pde).u & GST_PDE_PG_MASK)182 # define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) ((Pde).u & GST_PDE_BIG_PG_MASK)180 # define GST_GET_PTE_GCPHYS(Pte) PGM_A20_APPLY(pVCpu, ((Pte).u & GST_PTE_PG_MASK)) 181 # define GST_GET_PDE_GCPHYS(Pde) PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_PG_MASK)) 182 # define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_BIG_PG_MASK)) 183 183 # define GST_GET_PTE_SHW_FLAGS(pVCpu, Pte) ((Pte).u & (pVCpu)->pgm.s.fGst64ShadowedPteMask ) 184 184 # define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedPdeMask ) -
trunk/src/VBox/VMM/include/PGMInternal.h
r40054 r41391 2520 2520 2521 2521 2522 2523 /** @name A20 gate macros 2524 * @{ */ 2525 /*#define PGM_WITH_A20*/ 2526 #ifdef PGM_WITH_A20 2527 # define PGM_A20_IS_ENABLED(a_pVCpu) ((a_pVCpu)->pgm.s.fA20Enabled) 2528 # define PGM_A20_APPLY(a_pVCpu, a_GCPhys) ((a_pVCpu)->pgm.s.GCPhysA20Mask & (a_GCPhys)) 2529 # define PGM_A20_APPLY_TO_VAR(a_pVCpu, a_GCPhysVar) \ 2530 do { a_GCPhysVar = (a_pVCpu)->pgm.s.GCPhysA20Mask & a_GCPhysVar; } while (0) 2531 #else 2532 # define PGM_A20_IS_ENABLED(a_pVCpu) (true) 2533 # define PGM_A20_APPLY(a_pVCpu, a_GCPhys) (a_GCPhys) 2534 # define PGM_A20_APPLY_TO_VAR(a_pVCpu, a_GCPhysVar) do { } while (0) 2535 #endif 2536 /** @} */ 2537 2538 2522 2539 /** 2523 2540 * Trees are using self relative offsets as pointers.
Note:
See TracChangeset
for help on using the changeset viewer.