Changeset 8021 in vbox
- Timestamp:
- Apr 16, 2008 10:16:22 AM (17 years ago)
- svn:sync-xref-src-repo-rev:
- 29680
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h ¶
r7971 r8021 2578 2578 int pgmR3ChangeMode(PVM pVM, PGMMODE enmGuestMode); 2579 2579 2580 int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, int iPDOld); 2580 int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping); 2581 int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping); 2581 2582 PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr); 2582 void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, int iPDOld, int iPDNew);2583 void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping); 2583 2584 DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 2584 2585 … … 3585 3586 DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM) 3586 3587 { 3588 #ifdef IN_RING0 3589 /* There are no mappings in VT-x and AMD-V mode. */ 3590 Assert(pPGM->fDisableMappings); 3591 return false; 3592 #else 3587 3593 return !pPGM->fDisableMappings; 3594 #endif 3588 3595 } 3589 3596 -
trunk/src/VBox/VMM/PGMMap.cpp ¶
r7971 r8021 787 787 * Relocates a mapping to a new address. 788 788 * 789 * @param pVM VM handle. 790 * @param pMapping The mapping to relocate. 791 * @param iPDOld Old page directory index. 792 * @param iPDNew New page directory index. 793 */ 794 void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, int iPDOld, int iPDNew) 795 { 796 Log(("PGM: Relocating %s from %#x to %#x\n", pMapping->pszDesc, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT)); 789 * @param pVM VM handle. 790 * @param pMapping The mapping to relocate. 791 * @param GCPtrOldMapping The address of the start of the old mapping. 792 * @param GCPtrNewMapping The address of the start of the new mapping. 793 */ 794 void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping) 795 { 796 int iPDOld = GCPtrOldMapping >> X86_PD_SHIFT; 797 int iPDNew = GCPtrNewMapping >> X86_PD_SHIFT; 798 799 Log(("PGM: Relocating %s from %VGv to %VGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping)); 797 800 Assert(((unsigned)iPDOld << X86_PD_SHIFT) == pMapping->GCPtr); 798 801 … … 887 890 * 888 891 * @returns VBox status code. 889 * @param pVM VM Handle.890 * @param pMapping The mapping which conflicts.891 * @param pPDSrc The page directory of the guest OS.892 * @param iPDOld The index tothe start of the current mapping.893 */ 894 int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, int iPDOld)892 * @param pVM VM Handle. 893 * @param pMapping The mapping which conflicts. 894 * @param pPDSrc The page directory of the guest OS. 895 * @param GCPtrOldMapping The address of the start of the current mapping. 896 */ 897 int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping) 895 898 { 896 899 STAM_PROFILE_START(&pVM->pgm.s.StatHCResolveConflict, a); … … 930 933 931 934 /* 932 * Ask the mapping. 933 */ 934 if (pMapping->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pMapping->pvUser)) 935 { 936 pgmR3MapRelocate(pVM, pMapping, iPDOld, iPDNew); 935 * Ask for the mapping. 936 */ 937 RTGCPTR GCPtrNewMapping = iPDNew << X86_PD_SHIFT; 938 939 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser)) 940 { 941 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping); 937 942 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a); 938 943 return VINF_SUCCESS; … … 941 946 942 947 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a); 943 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, iPDOld << X86_PD_SHIFT, cPTs));948 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs)); 944 949 return VERR_PGM_NO_HYPERVISOR_ADDRESS; 945 950 } 946 951 952 /** 953 * Resolves a conflict between a page table based GC mapping and 954 * the Guest OS page tables. (PAE bits version) 955 * 956 * @returns VBox status code. 957 * @param pVM VM Handle. 958 * @param pMapping The mapping which conflicts. 959 * @param GCPtrOldMapping The address of the start of the current mapping. 960 */ 961 int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping) 962 { 963 STAM_PROFILE_START(&pVM->pgm.s.StatHCResolveConflict, a); 964 965 for (unsigned iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--) 966 { 967 unsigned iPDSrc; 968 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPDPTE << X86_PDPT_SHIFT, &iPDSrc); 969 970 /* 971 * Scan for free page directory entries. 972 * 973 * Note that we do not support mappings at the very end of the 974 * address space since that will break our GCPtrEnd assumptions. 975 */ 976 const unsigned cPTs = pMapping->cb >> X86_PD_PAE_SHIFT; 977 unsigned iPDNew = ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */ 978 979 while (iPDNew-- > 0) 980 { 981 /* Ugly assumption that mappings start on a 4 MB boundary. */ 982 if (iPDNew & 1) 983 continue; 984 985 if (pPDSrc) 986 { 987 if (pPDSrc->a[iPDNew].n.u1Present) 988 continue; 989 if (cPTs > 1) 990 { 991 bool fOk = true; 992 for (unsigned i = 1; fOk && i < cPTs; i++) 993 if (pPDSrc->a[iPDNew + i].n.u1Present) 994 fOk = false; 995 if (!fOk) 996 continue; 997 } 998 } 999 /* 1000 * Check that it's not conflicting with an intermediate page table mapping. 1001 */ 1002 bool fOk = true; 1003 unsigned i = cPTs; 1004 while (fOk && i-- > 0) 1005 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present; 1006 if (!fOk) 1007 continue; 1008 1009 /* 1010 * Ask for the mapping. 1011 */ 1012 RTGCPTR GCPtrNewMapping = iPDPTE << X86_PDPT_SHIFT + iPDNew << X86_PD_PAE_SHIFT; 1013 1014 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser)) 1015 { 1016 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping); 1017 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a); 1018 return VINF_SUCCESS; 1019 } 1020 } 1021 } 1022 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a); 1023 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT)); 1024 return VERR_PGM_NO_HYPERVISOR_ADDRESS; 1025 } 947 1026 948 1027 /** … … 963 1042 return false; 964 1043 965 Assert(PGMGetGuestMode(pVM) <= PGMMODE_32_BIT); 966 967 /* 968 * Resolve the page directory. 969 */ 970 PX86PD pPD = pVM->pgm.s.pGuestPDHC; /** @todo Fix PAE! */ 971 Assert(pPD); 972 Assert(pPD == (PX86PD)MMPhysGCPhys2HCVirt(pVM, cr3 & X86_CR3_PAGE_MASK, sizeof(*pPD))); 1044 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PAE); 973 1045 974 1046 /* 975 1047 * Iterate mappings. 976 1048 */ 977 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 978 { 979 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 980 unsigned iPT = pCur->cPTs; 981 while (iPT-- > 0) 982 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */ 983 && (fRawR0 || pPD->a[iPDE + iPT].n.u1User)) 1049 if (PGMGetGuestMode(pVM) == PGMMODE_32_BIT) 1050 { 1051 /* 1052 * Resolve the page directory. 1053 */ 1054 PX86PD pPD = pVM->pgm.s.pGuestPDHC; 1055 Assert(pPD); 1056 Assert(pPD == (PX86PD)MMPhysGCPhys2HCVirt(pVM, cr3 & X86_CR3_PAGE_MASK, sizeof(*pPD))); 1057 1058 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1059 { 1060 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 1061 unsigned iPT = pCur->cPTs; 1062 while (iPT-- > 0) 1063 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */ 1064 && (fRawR0 || pPD->a[iPDE + iPT].n.u1User)) 1065 { 1066 STAM_COUNTER_INC(&pVM->pgm.s.StatHCDetectedConflicts); 1067 Log(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s (32 bits)\n" 1068 " iPDE=%#x iPT=%#x PDE=%VGp.\n", 1069 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, 1070 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 1071 return true; 1072 } 1073 } 1074 } 1075 else 1076 if (PGMGetGuestMode(pVM) == PGMMODE_PAE) 1077 { 1078 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1079 { 1080 X86PDEPAE Pde; 1081 RTGCPTR GCPtr = pCur->GCPtr; 1082 1083 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT; 1084 while (iPT-- > 0) 984 1085 { 985 STAM_COUNTER_INC(&pVM->pgm.s.StatHCDetectedConflicts); 986 #if 1 987 Log(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s\n" 988 " iPDE=%#x iPT=%#x PDE=%VGp.\n", 989 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, 990 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 991 #else 992 AssertMsgFailed(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s\n" 993 " iPDE=%#x iPT=%#x PDE=%VGp.\n", 994 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, 995 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 996 #endif 997 return true; 1086 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr); 1087 1088 if ( Pde.n.u1Present 1089 && (fRawR0 || Pde.n.u1User)) 1090 { 1091 STAM_COUNTER_INC(&pVM->pgm.s.StatHCDetectedConflicts); 1092 Log(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s (PAE)\n" 1093 " PDE=%VGp.\n", 1094 GCPtr, pCur->pszDesc, Pde.u)); 1095 return true; 1096 } 1097 GCPtr += (1 << X86_PD_PAE_SHIFT); 998 1098 } 999 } 1099 } 1100 } 1101 else 1102 AssertFailed(); 1000 1103 1001 1104 return false; -
trunk/src/VBox/VMM/PGMPhys.cpp ¶
r7755 r8021 1013 1013 * Validate input 1014 1014 */ 1015 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);1016 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);1017 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);1018 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);1019 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);1015 VM_ASSERT_EMT_RETURN(pVM, false); 1016 AssertPtrReturn(pDevIns, false); 1017 AssertReturn(GCPhys != NIL_RTGCPHYS, false); 1018 AssertReturn(GCPhys != 0, false); 1019 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false); 1020 1020 1021 1021 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h ¶
r8002 r8021 59 59 #endif 60 60 61 #ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */ 62 #define PGM_WITHOUT_MAPPINGS 63 #endif 64 61 65 /** 62 66 * #PF Handler for raw-mode guest execution. … … 214 218 if (!pVM->pgm.s.fMappingsFixed) 215 219 { 216 unsigned iPT = pMapping->c PTs;220 unsigned iPT = pMapping->cb >> GST_PD_SHIFT; 217 221 while (iPT-- > 0) 218 222 if (pPDSrc->a[iPDSrc + iPT].n.u1Present) … … 1960 1964 SHWPDE PdeDst = *pPdeDst; 1961 1965 1962 # if PGM_GST_TYPE == PGM_TYPE_32BIT1966 # ifndef PGM_WITHOUT_MAPPINGS 1963 1967 /* 1964 1968 * Check for conflicts. … … 1976 1980 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage); 1977 1981 Assert(pMapping); 1978 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPDSrc); 1982 # if PGM_GST_TYPE == PGM_TYPE_32BIT 1983 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT)); 1984 # elif PGM_GST_TYPE == PGM_TYPE_PAE 1985 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT)); 1986 # endif 1979 1987 if (VBOX_FAILURE(rc)) 1980 1988 { … … 1985 1993 # endif 1986 1994 } 1987 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 1988 /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */ 1995 # else /* PGM_WITHOUT_MAPPINGS */ 1989 1996 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s)); 1990 # endif /* PGM_GST_TYPE == PGM_TYPE_32BIT*/1997 # endif /* !PGM_WITHOUT_MAPPINGS */ 1991 1998 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 1992 1999 … … 2678 2685 { 2679 2686 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings); 2680 iPdNoMapping = (pMapping) ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U; /** PAE todo */2687 iPdNoMapping = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U; 2681 2688 } 2682 2689 else … … 2729 2736 && (PdeSrc.n.u1User || fRawR0Enabled)) 2730 2737 { 2731 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2738 # if (PGM_GST_TYPE == PGM_TYPE_32BIT \ 2739 || PGM_GST_TYPE == PGM_TYPE_PAE) \ 2740 && !defined(PGM_WITHOUT_MAPPINGS) 2741 2732 2742 /* 2733 2743 * Check for conflicts with GC mappings. 2734 2744 */ 2745 # if PGM_GST_TYPE == PGM_TYPE_PAE 2746 if (iPD + iPDPTE * X86_PG_PAE_ENTRIES == iPdNoMapping) 2747 # else 2735 2748 if (iPD == iPdNoMapping) 2749 # endif 2736 2750 { 2737 2751 if (pVM->pgm.s.fMappingsFixed) 2738 2752 { 2739 2753 /* It's fixed, just skip the mapping. */ 2740 const unsigned cPTs = pMapping->c PTs;2754 const unsigned cPTs = pMapping->cb >> SHW_PD_SHIFT; 2741 2755 iPD += cPTs - 1; 2742 pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) *cPTs;2756 pPDEDst += cPTs + cPTs; 2743 2757 pMapping = pMapping->CTXALLSUFF(pNext); 2744 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;2758 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 2745 2759 continue; 2746 2760 } 2747 2761 # ifdef IN_RING3 2748 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD); 2762 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2763 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT); 2764 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2765 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, iPDPTE << GST_PDPT_SHIFT + iPD << GST_PD_SHIFT); 2766 # endif 2749 2767 if (VBOX_FAILURE(rc)) 2750 2768 return rc; … … 2754 2772 */ 2755 2773 pMapping = pVM->pgm.s.pMappingsR3; 2756 while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT))2774 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT)) 2757 2775 pMapping = pMapping->pNextR3; 2758 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;2776 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 2759 2777 # else 2760 2778 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n")); … … 2762 2780 # endif 2763 2781 } 2764 # else /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 2765 /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */ 2766 Assert(iPD != iPdNoMapping); 2767 # endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 2782 # else /* PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE && PGM_WITHOUT_MAPPINGS */ 2783 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s)); 2784 # endif /* (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) && !PGM_WITHOUT_MAPPINGS */ 2768 2785 /* 2769 2786 * Sync page directory entry. … … 2853 2870 } 2854 2871 } 2872 # if PGM_GST_TYPE == PGM_TYPE_PAE 2873 else if (iPD + iPDPTE * X86_PG_PAE_ENTRIES != iPdNoMapping) 2874 # else 2855 2875 else if (iPD != iPdNoMapping) 2876 # endif 2856 2877 { 2857 2878 /* … … 2877 2898 else 2878 2899 { 2879 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2900 # if (PGM_GST_TYPE == PGM_TYPE_32BIT \ 2901 || PGM_GST_TYPE == PGM_TYPE_PAE) \ 2902 && !defined(PGM_WITHOUT_MAPPINGS) 2903 2880 2904 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 2881 const unsigned cPTs = pMapping->cPTs;2882 2905 if (pVM->pgm.s.fMappingsFixed) 2883 2906 { 2884 2907 /* It's fixed, just skip the mapping. */ 2885 2908 pMapping = pMapping->CTXALLSUFF(pNext); 2886 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;2909 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 2887 2910 } 2888 2911 else … … 2893 2916 */ 2894 2917 iPdNoMapping = ~0U; 2895 unsigned iPT = cPTs;2918 unsigned iPT = pMapping->cb >> GST_PD_SHIFT; 2896 2919 while (iPT-- > 1) 2897 2920 { … … 2900 2923 { 2901 2924 # ifdef IN_RING3 2902 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD); 2925 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2926 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT); 2927 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2928 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, iPDPTE << GST_PDPT_SHIFT + iPD << GST_PD_SHIFT); 2929 # endif 2903 2930 if (VBOX_FAILURE(rc)) 2904 2931 return rc; … … 2908 2935 */ 2909 2936 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings); 2910 while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT))2937 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT)) 2911 2938 pMapping = pMapping->CTXALLSUFF(pNext); 2912 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;2939 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 2913 2940 break; 2914 2941 # else … … 2922 2949 pMapping = pMapping->CTXALLSUFF(pNext); 2923 2950 if (pMapping) 2924 iPdNoMapping = pMapping->GCPtr >> X86_PD_SHIFT;2951 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT; 2925 2952 } 2926 2953 } 2927 2954 2928 2955 /* advance. */ 2956 const unsigned cPTs = pMapping->cb >> SHW_PD_SHIFT; 2929 2957 iPD += cPTs - 1; 2930 pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs; 2931 # else /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 2932 /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */ 2933 AssertFailed(); 2934 # endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 2958 pPDEDst += cPTs + cPTs; 2959 # else /* PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE && PGM_WITHOUT_MAPPINGS */ 2960 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s)); 2961 # endif /* (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) && !PGM_WITHOUT_MAPPINGS */ 2935 2962 } 2936 2963
Note:
See TracChangeset
for help on using the changeset viewer.