Changeset 13232 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Oct 13, 2008 8:03:48 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 37813
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r13198 r13232 403 403 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved); 404 404 } 405 #endif 405 #endif /* VBOX_WITH_STATISTICS */ 406 406 407 407 /* … … 476 476 * @param cbSize Access size 477 477 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*)) 478 * @remarks Current not in use. 478 479 */ 479 480 VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess) … … 531 532 VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess) 532 533 { 533 /* 534 * Validate input. 535 */ 536 if (fAccess & ~(X86_PTE_US | X86_PTE_RW)) 537 { 538 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess)); 539 return VERR_INVALID_PARAMETER; 540 } 541 534 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess)); 535 536 /* 537 * Get going. 538 */ 542 539 uint64_t fPageGst; 543 540 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL); … … 567 564 { 568 565 /* 569 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning570 */566 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning 567 */ 571 568 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL); 572 569 if ( rc == VERR_PAGE_NOT_PRESENT … … 678 675 * Inform CSAM about the flush 679 676 */ 680 /* * @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */677 /* note: This is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */ 681 678 CSAMR3FlushPage(pVM, GCPtrPage); 682 679 # endif … … 753 750 * @remark You must use PGMMapModifyPage() for pages in a mapping. 754 751 */ 755 VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask) 756 { 757 /* 758 * Validate input. 759 */ 760 if (fFlags & X86_PTE_PAE_PG_MASK) 761 { 762 AssertMsgFailed(("fFlags=%#llx\n", fFlags)); 763 return VERR_INVALID_PARAMETER; 764 } 765 if (!cb) 766 { 767 AssertFailed(); 768 return VERR_INVALID_PARAMETER; 769 } 752 VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask) 753 { 754 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags)); 755 Assert(cb); 770 756 771 757 /* … … 870 856 } 871 857 872 873 858 #ifndef IN_GC 859 874 860 /** 875 861 * Syncs the SHADOW page directory pointer for the specified address. Allocates … … 909 895 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e]; 910 896 911 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e, &pShwPage); 897 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, 898 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e, &pShwPage); 912 899 } 913 900 else 914 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage); 901 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(63) /* hack: make the address unique */, 902 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage); 915 903 916 904 if (rc == VERR_PGM_POOL_FLUSHED) … … 978 966 } 979 967 968 980 969 /** 981 970 * Gets the SHADOW page directory pointer for the specified address. … … 1018 1007 return VINF_SUCCESS; 1019 1008 } 1009 1020 1010 1021 1011 /** … … 1106 1096 } 1107 1097 1108 #endif 1098 #endif /* IN_GC */ 1109 1099 1110 1100 /** … … 1179 1169 * Validate input. 1180 1170 */ 1181 if (fFlags & X86_PTE_PAE_PG_MASK) 1182 { 1183 AssertMsgFailed(("fFlags=%#llx\n", fFlags)); 1184 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a); 1185 return VERR_INVALID_PARAMETER; 1186 } 1187 1188 if (!cb) 1189 { 1190 AssertFailed(); 1191 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a); 1192 return VERR_INVALID_PARAMETER; 1193 } 1171 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags)); 1172 Assert(cb); 1194 1173 1195 1174 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask)); … … 1211 1190 } 1212 1191 1192 1213 1193 /** 1214 1194 * Gets the specified page directory pointer table entry. … … 1258 1238 } 1259 1239 1240 1260 1241 /** 1261 1242 * Gets the current CR3 register value for the nested memory context. … … 1284 1265 } 1285 1266 1267 1286 1268 /** 1287 1269 * Gets the current CR3 register value for the EPT paging memory context. … … 1293 1275 return pVM->pgm.s.HCPhysNestedRoot; 1294 1276 } 1277 1295 1278 1296 1279 /** … … 1360 1343 1361 1344 /** 1362 * Gets the current CR3 register value for the GC intermediate memory context.1345 * Gets the current CR3 register value for the RC intermediate memory context. 1363 1346 * @returns CR3 value. 1364 1347 * @param pVM The VM handle. 1365 1348 */ 1366 VMMDECL(RTHCPHYS) PGMGetInter GCCR3(PVM pVM)1349 VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM) 1367 1350 { 1368 1351 switch (pVM->pgm.s.enmShadowMode) … … 1494 1477 } 1495 1478 1496 /** 1497 * Performs and schedules necessary updates following a CR3 load or reload, 1498 * without actually flushing the TLB as with PGMFlushTLB. 1479 1480 /** 1481 * Performs and schedules necessary updates following a CR3 load or reload when 1482 * using nested or extended paging. 1483 * 1484 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the 1485 * TLB and triggering a SyncCR3. 1499 1486 * 1500 1487 * This will normally involve mapping the guest PD or nPDPT … … 1535 1522 return rc; 1536 1523 } 1524 1537 1525 1538 1526 /** … … 1934 1922 1935 1923 #else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1924 /** @todo @bugref{3202}: Implement ring-0 mapping cache similar to the one in 1925 * RC. To begin with, a simple but expensive one based on 1926 * RTR0MemObjEnterPhys can be used to get things started. Later a 1927 * global cache with mappings per CPU (to avoid shootdown) should be 1928 * employed. */ 1936 1929 AssertFailed(); 1937 1930 return VERR_NOT_IMPLEMENTED; … … 1961 1954 1962 1955 #endif /* IN_GC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1963 1964 1956 #ifdef VBOX_STRICT 1965 1957 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r13203 r13232 99 99 * Get PDs. 100 100 */ 101 int rc;101 int rc; 102 102 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 103 103 # if PGM_GST_TYPE == PGM_TYPE_32BIT … … 677 677 } 678 678 # ifdef CSAM_DETECT_NEW_CODE_PAGES 679 else 680 if ( uErr == X86_TRAP_PF_RW 681 && pRegFrame->ecx >= 0x100 /* early check for movswd count */ 682 && pRegFrame->ecx < 0x10000 683 ) 679 else if ( uErr == X86_TRAP_PF_RW 680 && pRegFrame->ecx >= 0x100 /* early check for movswd count */ 681 && pRegFrame->ecx < 0x10000) 684 682 { 685 683 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions … … 885 883 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 886 884 && PGM_SHW_TYPE != PGM_TYPE_EPT 887 int rc;885 int rc; 888 886 889 887 LogFlow(("InvalidatePage %VGv\n", GCPtrPage)); … … 1965 1963 1966 1964 1967 1968 1965 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 1969 1970 1966 /** 1971 1967 * Investigate page fault and handle write protection page faults caused by … … 2019 2015 { 2020 2016 uPageFaultLevel = 0; 2021 goto UpperLevelPageFault;2017 goto l_UpperLevelPageFault; 2022 2018 } 2023 2019 Assert(pPdpeSrc); … … 2040 2036 { 2041 2037 uPageFaultLevel = 1; 2042 goto UpperLevelPageFault;2038 goto l_UpperLevelPageFault; 2043 2039 } 2044 2040 # endif … … 2056 2052 { 2057 2053 uPageFaultLevel = 2; 2058 goto UpperLevelPageFault;2054 goto l_UpperLevelPageFault; 2059 2055 } 2060 2056 … … 2233 2229 2234 2230 2235 UpperLevelPageFault: 2236 /* Pagefault detected while checking the PML4E, PDPE or PDE. 2231 l_UpperLevelPageFault: 2232 /* 2233 * Pagefault detected while checking the PML4E, PDPE or PDE. 2237 2234 * Single exit handler to get rid of duplicate code paths. 2238 2235 */ … … 2274 2271 return VINF_EM_RAW_GUEST_TRAP; 2275 2272 } 2276 2277 2273 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 2278 2274 … … 2303 2299 && PGM_SHW_TYPE != PGM_TYPE_EPT 2304 2300 2305 int rc = VINF_SUCCESS;2301 int rc = VINF_SUCCESS; 2306 2302 2307 2303 /* … … 2703 2699 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 2704 2700 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2705 const unsigned iPdpte 2706 const unsigned iPDDst 2701 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 2702 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 2707 2703 PX86PDPAE pPDDst; 2708 2704 PX86PDPT pPdptDst; … … 2712 2708 2713 2709 /* Fetch the pgm pool shadow descriptor. */ 2714 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK);2710 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK); 2715 2711 Assert(pShwPde); 2716 2712 # elif PGM_SHW_TYPE == PGM_TYPE_EPT … … 2853 2849 # if PGM_GST_TYPE == PGM_TYPE_PROT 2854 2850 /* AMD-V nested paging */ 2855 X86PML4E Pml4eSrc;2856 X86PDPE PdpeSrc;2857 PX86PML4E pPml4eSrc = &Pml4eSrc;2851 X86PML4E Pml4eSrc; 2852 X86PDPE PdpeSrc; 2853 PX86PML4E pPml4eSrc = &Pml4eSrc; 2858 2854 2859 2855 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ … … 2889 2885 } 2890 2886 return rc; 2887 2891 2888 #elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT 2892 2889 return VINF_SUCCESS; /* ignore */ … … 2924 2921 } 2925 2922 # endif 2923 2926 2924 /* 2927 2925 * Get guest PD and index. 2928 2926 */ 2929 2930 2927 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 2931 2928 # if PGM_GST_TYPE == PGM_TYPE_32BIT … … 2956 2953 const unsigned iPDSrc = 0; 2957 2954 # endif 2958 int rc = VINF_SUCCESS;2955 int rc = VINF_SUCCESS; 2959 2956 2960 2957 /* … … 2962 2959 */ 2963 2960 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2964 PX86PDE pPdeDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[GCPtrPage >> SHW_PD_SHIFT];2961 PX86PDE pPdeDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[GCPtrPage >> SHW_PD_SHIFT]; 2965 2962 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2966 PX86PDEPAE pPdeDst = &pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[GCPtrPage >> SHW_PD_SHIFT];2963 PX86PDEPAE pPdeDst = &pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[GCPtrPage >> SHW_PD_SHIFT]; 2967 2964 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2968 2965 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); … … 2972 2969 # if PGM_GST_TYPE == PGM_TYPE_PROT 2973 2970 /* AMD-V nested paging */ 2974 X86PML4E Pml4eSrc;2975 X86PDPE PdpeSrc;2976 PX86PML4E pPml4eSrc = &Pml4eSrc;2971 X86PML4E Pml4eSrc; 2972 X86PDPE PdpeSrc; 2973 PX86PML4E pPml4eSrc = &Pml4eSrc; 2977 2974 2978 2975 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ … … 3369 3366 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s)); 3370 3367 # endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */ 3368 3371 3369 /* 3372 * Sync page directory entry.3373 *3374 * The current approach is to allocated the page table but to set3375 * the entry to not-present and postpone the page table synching till3376 * it's actually used.3377 */3370 * Sync page directory entry. 3371 * 3372 * The current approach is to allocated the page table but to set 3373 * the entry to not-present and postpone the page table synching till 3374 * it's actually used. 3375 */ 3378 3376 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3379 3377 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ … … 3475 3473 { 3476 3474 /* 3477 * Check if there is any page directory to mark not present here.3478 */3475 * Check if there is any page directory to mark not present here. 3476 */ 3479 3477 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3480 3478 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ … … 3625 3623 3626 3624 #if PGM_GST_TYPE == PGM_TYPE_PAE 3627 /* @todo currently broken; crashes below somewhere */3625 /** @todo currently broken; crashes below somewhere */ 3628 3626 AssertFailed(); 3629 3627 #endif … … 3634 3632 3635 3633 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3636 bool fBigPagesSupported = true;3634 bool fBigPagesSupported = true; 3637 3635 # else 3638 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);3639 # endif 3640 PPGM pPGM = &pVM->pgm.s;3641 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */3642 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */3636 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE); 3637 # endif 3638 PPGM pPGM = &pVM->pgm.s; 3639 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */ 3640 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */ 3643 3641 # ifndef IN_RING0 3644 RTHCPHYS HCPhys; /* general usage. */3645 # endif 3646 int rc;3642 RTHCPHYS HCPhys; /* general usage. */ 3643 # endif 3644 int rc; 3647 3645 3648 3646 /* … … 3675 3673 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 3676 3674 unsigned cPDEs = X86_PG_ENTRIES; 3677 unsigned ulIncrement= X86_PG_ENTRIES * PAGE_SIZE;3675 unsigned cIncrement = X86_PG_ENTRIES * PAGE_SIZE; 3678 3676 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 3679 3677 # if PGM_GST_TYPE == PGM_TYPE_32BIT … … 3682 3680 unsigned cPDEs = X86_PG_PAE_ENTRIES; 3683 3681 # endif 3684 unsigned ulIncrement= X86_PG_PAE_ENTRIES * PAGE_SIZE;3682 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE; 3685 3683 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 3686 3684 unsigned cPDEs = X86_PG_PAE_ENTRIES; 3687 unsigned ulIncrement= X86_PG_PAE_ENTRIES * PAGE_SIZE;3685 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE; 3688 3686 # endif 3689 3687 if (cb != ~(RTGCUINTPTR)0) … … 3693 3691 3694 3692 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 3695 PPGMPOOL pPool= pVM->pgm.s.CTX_SUFF(pPool);3693 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3696 3694 # endif 3697 3695 … … 3701 3699 for (; iPml4e < X86_PG_PAE_ENTRIES; iPml4e++) 3702 3700 { 3703 PPGMPOOLPAGE pShwPdpt = NULL; 3704 PX86PML4E pPml4eSrc, pPml4eDst; 3705 RTGCPHYS GCPhysPdptSrc; 3701 PPGMPOOLPAGE pShwPdpt = NULL; 3702 PX86PML4E pPml4eSrc; 3703 PX86PML4E pPml4eDst; 3704 RTGCPHYS GCPhysPdptSrc; 3706 3705 3707 3706 pPml4eSrc = &pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPml4e]; … … 3711 3710 if (!pPml4eDst->n.u1Present) 3712 3711 { 3713 GCPtr += UINT64_C(_2M * 512 *512);3712 GCPtr += _2M * UINT64_C(512) * UINT64_C(512); 3714 3713 continue; 3715 3714 } … … 3725 3724 { 3726 3725 AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u)); 3727 GCPtr += UINT64_C(_2M * 512 *512);3726 GCPtr += _2M * UINT64_C(512) * UINT64_C(512); 3728 3727 cErrors++; 3729 3728 continue; … … 3733 3732 { 3734 3733 AssertMsgFailed(("Physical address doesn't match! iPml4e %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4e, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc)); 3735 GCPtr += UINT64_C(_2M * 512 *512);3734 GCPtr += _2M * UINT64_C(512) * UINT64_C(512); 3736 3735 cErrors++; 3737 3736 continue; … … 3743 3742 { 3744 3743 AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u)); 3745 GCPtr += UINT64_C(_2M * 512 *512);3744 GCPtr += _2M * UINT64_C(512) * UINT64_C(512); 3746 3745 cErrors++; 3747 3746 continue; … … 3833 3832 # endif 3834 3833 # if PGM_GST_TYPE == PGM_TYPE_32BIT 3835 const GSTPD *pPDSrc = CTXSUFF(pPGM->pGuestPD);3834 const GSTPD *pPDSrc = CTXSUFF(pPGM->pGuestPD); 3836 3835 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 3837 const X86PD *pPDDst = pPGM->CTXMID(p,32BitPD);3836 PCX86PD pPDDst = pPGM->CTXMID(p,32BitPD); 3838 3837 # else 3839 const PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0];/* We treat this as a PD with 2048 entries, so no need to and with SHW_PD_MASK to get iPDDst */3838 PCX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* We treat this as a PD with 2048 entries, so no need to and with SHW_PD_MASK to get iPDDst */ 3840 3839 # endif 3841 3840 # endif … … 3848 3847 for (; 3849 3848 iPDDst < cPDEs; 3850 iPDDst++, GCPtr += ulIncrement)3849 iPDDst++, GCPtr += cIncrement) 3851 3850 { 3852 3851 const SHWPDE PdeDst = pPDDst->a[iPDDst]; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r13087 r13232 60 60 # define PGSTPDE PSHWPDE 61 61 # define GST_PTE_PG_MASK SHW_PTE_PG_MASK 62 62 63 #elif PGM_GST_TYPE == PGM_TYPE_32BIT 63 64 # define GSTPT X86PT … … 81 82 # define GST_PT_MASK X86_PT_MASK 82 83 # define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK 84 83 85 #elif PGM_GST_TYPE == PGM_TYPE_PAE \ 84 86 || PGM_GST_TYPE == PGM_TYPE_AMD64 … … 503 505 504 506 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32))); 505 try_again:507 l_try_again: 506 508 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.pHCShwAmd64CR3); 507 509 if (rc == VERR_PGM_POOL_FLUSHED) … … 511 513 rc = pgmPoolSyncCR3(pVM); 512 514 AssertRC(rc); 513 goto try_again;515 goto l_try_again; 514 516 } 515 517 pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3); … … 649 651 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3; 650 652 } 653 651 654 /* 652 655 * Do the 4 PDs. … … 783 786 #endif 784 787 785 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;786 unsigned iPage = 0;788 unsigned offPage = GCPtr & PAGE_OFFSET_MASK; 789 unsigned iPage = 0; 787 790 while (iPage < pCur->cPages) 788 791 { … … 1043 1046 1044 1047 #endif /* PGM_TYPE_32BIT && !IN_RING3 */ 1045 1046 1047 1048 #if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3) 1048 1049 -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r13085 r13232 92 92 * Validate input. 93 93 */ 94 if (GCPhys >= GCPhysLast) 95 { 96 AssertMsgFailed(("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast)); 97 return VERR_INVALID_PARAMETER; 98 } 94 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER); 99 95 switch (enmType) 100 96 { … … 107 103 return VERR_INVALID_PARAMETER; 108 104 } 109 if ( (RTRCUINTPTR)pvUserRC >= 0x10000 110 && MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) != pvUserRC) 111 { 112 AssertMsgFailed(("Not RC pointer! pvUserRC=%RRv\n", pvUserRC)); 113 return VERR_INVALID_PARAMETER; 114 } 105 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000 106 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC, 107 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC), 108 VERR_INVALID_PARAMETER); 115 109 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerRC, VERR_INVALID_PARAMETER); 116 110 … … 659 653 pgmLock(pVM); 660 654 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 661 if ( pCur)662 { 663 if ( GCPhysSplit <= pCur->Core.KeyLast)655 if (RT_LIKELY(pCur)) 656 { 657 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast)) 664 658 { 665 659 /* … … 673 667 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 674 668 675 if (RT AvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))669 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))) 676 670 { 677 671 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n", … … 716 710 pgmLock(pVM); 717 711 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1); 718 if ( pCur1)712 if (RT_LIKELY(pCur1)) 719 713 { 720 714 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2); 721 if ( pCur2)715 if (RT_LIKELY(pCur2)) 722 716 { 723 717 /* 724 718 * Make sure that they are adjacent, and that they've got the same callbacks. 725 719 */ 726 if ( pCur1->Core.KeyLast + 1 == pCur2->Core.Key)720 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key)) 727 721 { 728 if ( pCur1->pfnHandlerRC == pCur2->pfnHandlerRC729 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0730 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)722 if (RT_LIKELY( pCur1->pfnHandlerRC == pCur2->pfnHandlerRC 723 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0 724 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)) 731 725 { 732 726 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2); 733 if ( pCur3 == pCur2)727 if (RT_LIKELY(pCur3 == pCur2)) 734 728 { 735 729 pCur1->Core.KeyLast = pCur2->Core.KeyLast; … … 794 788 int rc; 795 789 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 796 if ( pCur)790 if (RT_LIKELY(pCur)) 797 791 { 798 792 /* … … 871 865 */ 872 866 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 873 if ( pCur)874 { 875 if ( GCPhysPage >= pCur->Core.Key876 && GCPhysPage <= pCur->Core.KeyLast)867 if (RT_LIKELY(pCur)) 868 { 869 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key 870 && GCPhysPage <= pCur->Core.KeyLast)) 877 871 { 878 872 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK)); … … 925 919 */ 926 920 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 927 if ( pCur)928 { 929 if ( GCPhysPage >= pCur->Core.Key930 && GCPhysPage <= pCur->Core.KeyLast)921 if (RT_LIKELY(pCur)) 922 { 923 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key 924 && GCPhysPage <= pCur->Core.KeyLast)) 931 925 { 932 926 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK)); … … 961 955 * 962 956 * @returns boolean 963 * @param pVM VM Handle 957 * @param pVM VM Handle. 964 958 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister(). 965 959 */ … … 1143 1137 } 1144 1138 1145 1146 1139 #if defined(VBOX_STRICT) || defined(LOG_ENABLED) 1140 1147 1141 /** 1148 1142 * Worker for pgmHandlerVirtualDumpPhysPages. … … 1172 1166 pgmHandlerVirtualDumpPhysPagesCallback, 0); 1173 1167 } 1168 1174 1169 #endif /* VBOX_STRICT || LOG_ENABLED */ 1175 1176 1170 #ifdef VBOX_STRICT 1177 1171 -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r13019 r13232 55 55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages)); 56 56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags)); 57 //Assert(HCPhys < _4G); --- Don't *EVER* try 32-bit shadow mode on a PAE/AMD64 box with memory above 4G !!!58 57 59 58 /* hypervisor defaults */ … … 148 147 * Validate input. 149 148 */ 150 if (fFlags & X86_PTE_PAE_PG_MASK) 151 { 152 AssertMsgFailed(("fFlags=%#x\n", fFlags)); 153 return VERR_INVALID_PARAMETER; 154 } 155 if (!cb) 156 { 157 AssertFailed(); 158 return VERR_INVALID_PARAMETER; 159 } 149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags)); 150 Assert(cb); 160 151 161 152 /* … … 175 166 if (off < pCur->cb) 176 167 { 177 if (off + cb > pCur->cb) 178 { 179 AssertMsgFailed(("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n", 180 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast)); 181 return VERR_INVALID_PARAMETER; 182 } 168 AssertMsgReturn(off + cb <= pCur->cb, 169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n", 170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast), 171 VERR_INVALID_PARAMETER); 183 172 184 173 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r13203 r13232 60 60 # define SHW_PT_MASK X86_PT_MASK 61 61 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD 62 62 63 #elif PGM_SHW_TYPE == PGM_TYPE_EPT 63 64 # define SHWPT EPTPT … … 80 81 # define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES) 81 82 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */ 83 82 84 #else 83 85 # define SHWPT X86PTPAE … … 95 97 # define SHW_PT_SHIFT X86_PT_PAE_SHIFT 96 98 # define SHW_PT_MASK X86_PT_PAE_MASK 97 # if PGM_SHW_TYPE == PGM_TYPE_AMD6498 # define SHW_PDPT_SHIFTX86_PDPT_SHIFT99 # define SHW_PDPT_MASKX86_PDPT_MASK_AMD64100 # define SHW_PDPE_PG_MASKX86_PDPE_PG_MASK101 # define SHW_TOTAL_PD_ENTRIES(X86_PG_AMD64_ENTRIES*X86_PG_AMD64_PDPE_ENTRIES)102 # define SHW_POOL_ROOT_IDXPGMPOOL_IDX_PAE_PD /* do not use! exception is real mode & protected mode without paging. */103 # else /* 32 bits PAE mode */104 # define SHW_PDPT_SHIFTX86_PDPT_SHIFT105 # define SHW_PDPT_MASKX86_PDPT_MASK_PAE106 # define SHW_PDPE_PG_MASKX86_PDPE_PG_MASK107 # define SHW_TOTAL_PD_ENTRIES(X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES)108 # define SHW_POOL_ROOT_IDXPGMPOOL_IDX_PAE_PD109 # endif99 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 100 # define SHW_PDPT_SHIFT X86_PDPT_SHIFT 101 # define SHW_PDPT_MASK X86_PDPT_MASK_AMD64 102 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 103 # define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES*X86_PG_AMD64_PDPE_ENTRIES) 104 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD /* do not use! exception is real mode & protected mode without paging. */ 105 # else /* 32 bits PAE mode */ 106 # define SHW_PDPT_SHIFT X86_PDPT_SHIFT 107 # define SHW_PDPT_MASK X86_PDPT_MASK_PAE 108 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 109 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES) 110 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD 111 # endif 110 112 #endif 111 113 … … 143 145 */ 144 146 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 145 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);147 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE); 146 148 X86PDEPAE Pde; 147 149 148 150 /* PML4 */ 149 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;151 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 150 152 X86PML4E Pml4e = CTXMID(pVM->pgm.s.p,PaePML4)->a[iPml4]; 151 153 if (!Pml4e.n.u1Present) … … 153 155 154 156 /* PDPT */ 155 PX86PDPT pPDPT;157 PX86PDPT pPDPT; 156 158 int rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT); 157 159 if (VBOX_FAILURE(rc)) 158 160 return rc; 159 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;160 X86PDPE Pdpe = pPDPT->a[iPDPT];161 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 162 X86PDPE Pdpe = pPDPT->a[iPDPT]; 161 163 if (!Pdpe.n.u1Present) 162 164 return VERR_PAGE_TABLE_NOT_PRESENT; 163 165 164 166 /* PD */ 165 PX86PDPAE pPd;167 PX86PDPAE pPd; 166 168 rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd); 167 169 if (VBOX_FAILURE(rc)) 168 170 return rc; 169 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;171 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK; 170 172 Pde = pPd->a[iPd]; 171 173 … … 177 179 178 180 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 179 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE); 180 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 181 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 182 X86PDEPAE Pde = CTXMID(pVM->pgm.s.ap,PaePDs)[iPDPT]->a[iPd]; 181 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE); 182 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 183 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 184 X86PDEPAE Pde = CTXMID(pVM->pgm.s.ap,PaePDs)[iPDPT]->a[iPd]; 185 183 186 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 184 187 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK); … … 194 197 Assert(pPDDst); 195 198 Pde = pPDDst->a[iPd]; 199 196 200 # else /* PGM_TYPE_32BIT */ 197 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;198 X86PDE Pde = CTXMID(pVM->pgm.s.p,32BitPD)->a[iPd];201 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK; 202 X86PDE Pde = CTXMID(pVM->pgm.s.p,32BitPD)->a[iPd]; 199 203 # endif 200 204 if (!Pde.n.u1Present) … … 206 210 * Get PT entry. 207 211 */ 208 PSHWPT pPT;212 PSHWPT pPT; 209 213 if (!(Pde.u & PGM_PDFLAGS_MAPPING)) 210 214 { … … 230 234 # endif 231 235 } 232 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;233 SHWPTE Pte = pPT->a[iPt];236 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK; 237 SHWPTE Pte = pPT->a[iPt]; 234 238 if (!Pte.n.u1Present) 235 239 return VERR_PAGE_NOT_PRESENT; … … 290 294 */ 291 295 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 292 X86PDEPAE Pde;296 X86PDEPAE Pde; 293 297 /* PML4 */ 294 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;295 X86PML4E Pml4e = CTXMID(pVM->pgm.s.p,PaePML4)->a[iPml4];298 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 299 X86PML4E Pml4e = CTXMID(pVM->pgm.s.p,PaePML4)->a[iPml4]; 296 300 if (!Pml4e.n.u1Present) 297 301 return VERR_PAGE_TABLE_NOT_PRESENT; 298 302 299 303 /* PDPT */ 300 PX86PDPT pPDPT;304 PX86PDPT pPDPT; 301 305 rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT); 302 306 if (VBOX_FAILURE(rc)) 303 307 return rc; 304 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;305 X86PDPE Pdpe = pPDPT->a[iPDPT];308 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 309 X86PDPE Pdpe = pPDPT->a[iPDPT]; 306 310 if (!Pdpe.n.u1Present) 307 311 return VERR_PAGE_TABLE_NOT_PRESENT; 308 312 309 313 /* PD */ 310 PX86PDPAE pPd;314 PX86PDPAE pPd; 311 315 rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd); 312 316 if (VBOX_FAILURE(rc)) … … 316 320 317 321 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 318 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;319 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;322 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 323 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 320 324 X86PDEPAE Pde = CTXMID(pVM->pgm.s.ap,PaePDs)[iPDPT]->a[iPd]; 321 325 … … 333 337 Assert(pPDDst); 334 338 Pde = pPDDst->a[iPd]; 339 335 340 # else /* PGM_TYPE_32BIT */ 336 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;337 X86PDE Pde = CTXMID(pVM->pgm.s.p,32BitPD)->a[iPd];341 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK; 342 X86PDE Pde = CTXMID(pVM->pgm.s.p,32BitPD)->a[iPd]; 338 343 # endif 339 344 if (!Pde.n.u1Present) … … 343 348 * Map the page table. 344 349 */ 345 PSHWPT pPT;350 PSHWPT pPT; 346 351 rc = PGM_HCPHYS_2_PTR(pVM, Pde.u & SHW_PDE_PG_MASK, &pPT); 347 352 if (VBOX_FAILURE(rc)) 348 353 return rc; 349 354 350 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;355 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK; 351 356 while (iPTE < RT_ELEMENTS(pPT->a)) 352 357 {
Note:
See TracChangeset
for help on using the changeset viewer.