- Timestamp:
- Nov 3, 2021 8:31:27 AM (3 years ago)
- Location:
- trunk
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/pgm.h
r92164 r92186 274 274 } PGMMODE; 275 275 276 /** 277 * Second level address translation mode. 278 */ 279 typedef enum PGMSLAT 280 { 281 /** The usual invalid value. */ 282 PGMSLAT_INVALID = 0, 283 /** No second level translation. */ 284 PGMSLAT_DIRECT, 285 /** Intel Extended Page Tables (EPT). */ 286 PGMSLAT_EPT, 287 /** AMD-V Nested Paging 32-bit. */ 288 PGMSLAT_32BIT, 289 /** AMD-V Nested Paging PAE. */ 290 PGMSLAT_PAE, 291 /** AMD-V Nested Paging 64-bit. */ 292 PGMSLAT_AMD64, 293 /** 32bit hackishness. */ 294 PGMSLAT_32BIT_HACK = 0x7fffffff 295 } PGMSLAT; 296 276 297 /** Macro for checking if the guest is using paging. 277 298 * @param enmMode PGMMODE_*. … … 351 372 VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM); 352 373 VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode); 374 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 375 VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode); 376 #endif 353 377 VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu); 354 378 VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe); -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r92046 r92186 50 50 DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD); 51 51 DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde); 52 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 53 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALKGST pWalk); 54 #endif 52 55 static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD); 53 56 static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD); 57 58 59 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 60 /* Guest - EPT SLAT is identical for all guest paging mode. */ 61 # define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT 62 # define PGM_GST_TYPE PGM_TYPE_EPT 63 # include "PGMGstDefs.h" 64 # include "PGMAllGstSlatEpt.h" 65 # undef PGM_GST_TYPE 66 #endif 54 67 55 68 … … 489 502 #endif /* VBOX_WITH_64_BITS_GUESTS */ 490 503 491 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT492 /* Guest - EPT mode */493 # define PGM_GST_TYPE PGM_TYPE_EPT494 # define PGM_GST_NAME(name) PGM_GST_NAME_EPT(name)495 # define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_EPT(name)496 # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS497 # include "PGMGstDefs.h"498 # include "PGMAllGst.h"499 # include "PGMAllBth.h"500 # undef BTH_PGMPOOLKIND_PT_FOR_PT501 # undef PGM_BTH_NAME502 # undef PGM_GST_TYPE503 # undef PGM_GST_NAME504 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */505 506 504 #undef PGM_SHW_TYPE 507 505 #undef PGM_SHW_NAME … … 629 627 # endif 630 628 }, 631 #endif632 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT633 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_32BIT */634 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_PAE */635 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_AMD64 */636 {637 PGM_TYPE_EPT,638 PGM_GST_NAME_EPT(GetPage),639 PGM_GST_NAME_EPT(ModifyPage),640 PGM_GST_NAME_EPT(Enter),641 PGM_GST_NAME_EPT(Exit),642 # ifdef IN_RING3643 PGM_GST_NAME_EPT(Relocate),644 # endif645 }646 629 #endif 647 630 }; … … 893 876 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */ 894 877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */ 895 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT896 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_EPT, PGM_BTH_NAME_EPT_EPT),897 #else898 878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */ 899 #endif900 879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */ 901 880 … … 1966 1945 return VERR_PGM_NOT_USED_IN_MODE; 1967 1946 1968 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1969 1947 case PGMMODE_EPT: 1970 pWalk->enmType = PGMPTWALKGSTTYPE_EPT;1971 return PGM_GST_NAME_EPT(Walk)(pVCpu, GCPtr, &pWalk->u.Ept);1972 #else1973 case PGMMODE_EPT:1974 #endif1975 1948 case PGMMODE_NESTED_32BIT: 1976 1949 case PGMMODE_NESTED_PAE: … … 1982 1955 } 1983 1956 } 1957 1958 1959 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1960 /** 1961 * Performs a guest second-level address translation (SLAT). 1962 * 1963 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this 1964 * function. 1965 * 1966 * @returns VBox status code. 1967 * @retval VINF_SUCCESS on success. 1968 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details. 1969 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is 1970 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID. 1971 * 1972 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1973 * @param GCPhysNested The nested-guest physical address being translated 1974 * (input). 1975 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is 1976 * valid. This indicates the SLAT is caused when 1977 * translating a nested-guest linear address. 1978 * @param GCPtrNested The nested-guest virtual address that initiated the 1979 * SLAT. If none, pass NIL_RTGCPTR. 1980 * @param pWalk Where to return the walk result. This is valid for 1981 * some error codes as well. 1982 */ 1983 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, 1984 PPGMPTWALKGST pWalk) 1985 { 1986 Assert(pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT); 1987 switch (pVCpu->pgm.s.enmGuestSlatMode) 1988 { 1989 case PGMSLAT_EPT: 1990 pWalk->enmType = PGMPTWALKGSTTYPE_EPT; 1991 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, &pWalk->u.Ept); 1992 1993 default: 1994 AssertFailed(); 1995 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID; 1996 return VERR_PGM_NOT_USED_IN_MODE; 1997 } 1998 } 1999 #endif 1984 2000 1985 2001 … … 2906 2922 */ 2907 2923 PGMMODE enmGuestMode; 2908 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT2909 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))2910 enmGuestMode = PGMMODE_EPT;2911 else2912 #endif2913 2924 if (cr0 & X86_CR0_PG) 2914 2925 { … … 3116 3127 } 3117 3128 break; 3118 3119 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT3120 case PGMMODE_EPT:3121 /* Nested paging is a requirement for nested VT-x. */3122 Assert(enmHostMode == PGMMODE_EPT);3123 break;3124 #endif3125 3129 3126 3130 default: … … 3340 3344 } 3341 3345 3346 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 3347 /* Update the guest SLAT mode if it's a nested-guest. */ 3348 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 3349 { 3350 if (PGMMODE_WITH_PAGING(enmGuestMode)) 3351 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT; 3352 else 3353 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT; 3354 } 3355 else 3356 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT); 3357 #endif 3358 3342 3359 /* Enter the new guest mode. */ 3343 3360 pVCpu->pgm.s.enmGuestMode = enmGuestMode; … … 3484 3501 3485 3502 3503 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 3504 /** 3505 * Gets the SLAT mode name. 3506 * 3507 * @returns The read-only SLAT mode descriptive string. 3508 * @param enmSlatMode The SLAT mode value. 3509 */ 3510 VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode) 3511 { 3512 switch (enmSlatMode) 3513 { 3514 case PGMSLAT_DIRECT: return "Direct"; 3515 case PGMSLAT_EPT: return "EPT"; 3516 case PGMSLAT_32BIT: return "32-bit"; 3517 case PGMSLAT_PAE: return "PAE"; 3518 case PGMSLAT_AMD64: return "AMD64"; 3519 default: return "Unknown"; 3520 } 3521 } 3522 #endif 3523 3524 3486 3525 /** 3487 3526 * Gets the physical address represented in the guest CR3 as PGM sees it. … … 3839 3878 VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr) 3840 3879 { 3841 pVCpu->pgm.s.uEptPtr = uEptPtr; 3842 } 3843 3880 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 3881 PGM_LOCK_VOID(pVM); 3882 if (pVCpu->pgm.s.uEptPtr != uEptPtr) 3883 { 3884 pVCpu->pgm.s.uEptPtr = uEptPtr; 3885 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR; 3886 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR; 3887 } 3888 PGM_UNLOCK(pVM); 3889 } 3890 -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r92076 r92186 23 23 #if PGM_GST_TYPE == PGM_TYPE_32BIT \ 24 24 || PGM_GST_TYPE == PGM_TYPE_PAE \ 25 || PGM_GST_TYPE == PGM_TYPE_EPT \26 25 || PGM_GST_TYPE == PGM_TYPE_AMD64 27 26 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk); … … 74 73 #if PGM_GST_TYPE == PGM_TYPE_32BIT \ 75 74 || PGM_GST_TYPE == PGM_TYPE_PAE \ 76 || PGM_GST_TYPE == PGM_TYPE_EPT \77 75 || PGM_GST_TYPE == PGM_TYPE_AMD64 78 76 … … 117 115 { 118 116 int rc; 117 118 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 119 # define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \ 120 do { \ 121 if ((a_pVCpu)->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT) \ 122 { \ 123 PGMPTWALKGST SlatWalk; \ 124 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk); \ 125 if (RT_SUCCESS(rcX)) \ 126 (a_GCPhysOut) = SlatWalk.u.Core.GCPhys; \ 127 else \ 128 { \ 129 (a_pWalk)->Core = SlatWalk.u.Core; \ 130 return rcX; \ 131 } \ 132 } \ 133 } while (0) 134 #endif 119 135 120 136 /* … … 137 153 # if PGM_GST_TYPE == PGM_TYPE_AMD64 138 154 /* 139 * The PML E4.155 * The PML4 table. 140 156 */ 141 157 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4); … … 158 174 159 175 /* 160 * The PDP E.176 * The PDPT. 161 177 */ 162 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pWalk->pPdpt); 178 RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK; 179 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 180 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk); 181 #endif 182 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pWalk->pPdpt); 163 183 if (RT_SUCCESS(rc)) { /* probable */ } 164 184 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); … … 168 188 if (RT_SUCCESS(rc)) { /* probable */ } 169 189 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 170 171 # elif PGM_GST_TYPE == PGM_TYPE_EPT 172 rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4); 173 if (RT_SUCCESS(rc)) { /* probable */ } 174 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 175 176 PEPTPML4E pPml4e; 177 pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK]; 178 EPTPML4E Pml4e; 179 pWalk->Pml4e.u = Pml4e.u = pPml4e->u; 180 181 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ } 182 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4); 183 184 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ } 185 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 186 187 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt); 188 uint64_t const fEptAttrs = Pml4e.u & EPT_PML4E_ATTR_MASK; 189 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 190 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ); 191 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 192 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 193 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK; 194 pWalk->Core.fEffective = fEffective = RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute) 195 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fRead & fWrite) 196 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1) 197 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed) 198 | fEffectiveEpt; 199 200 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pWalk->pPdpt); 201 if (RT_SUCCESS(rc)) { /* probable */ } 202 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); 203 # endif 190 #endif 204 191 } 205 192 { … … 226 213 227 214 /* 228 * The PD E.215 * The PD. 229 216 */ 230 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pWalk->pPd); 217 RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK; 218 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 219 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk); 220 # endif 221 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pWalk->pPd); 231 222 if (RT_SUCCESS(rc)) { /* probable */ } 232 223 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc); 224 233 225 # elif PGM_GST_TYPE == PGM_TYPE_32BIT 234 226 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd); 235 227 if (RT_SUCCESS(rc)) { /* probable */ } 236 228 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 237 238 # elif PGM_GST_TYPE == PGM_TYPE_EPT239 PEPTPDPTE pPdpte;240 pWalk->pPdpte = pPdpte = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];241 EPTPDPTE Pdpte;242 pWalk->Pdpte.u = Pdpte.u = pPdpte->u;243 244 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }245 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);246 247 /* The order of the following 2 "if" statements matter. */248 if (GST_IS_PDPE_VALID(pVCpu, Pdpte))249 {250 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK;251 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);252 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);253 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);254 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;255 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)256 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)257 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)258 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)259 | fEffectiveEpt;260 }261 else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte))262 {263 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;264 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);265 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);266 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);267 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);268 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;269 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)270 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)271 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)272 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)273 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty)274 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)275 | fEffectiveEpt;276 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);277 pWalk->Core.fEffectiveUS = true;278 pWalk->Core.fEffectiveNX = !fExecute;279 pWalk->Core.fGigantPage = true;280 pWalk->Core.fSucceeded = true;281 pWalk->Core.GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)282 | (GCPtr & GST_GIGANT_PAGE_OFFSET_MASK);283 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);284 return VINF_SUCCESS;285 }286 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);287 229 # endif 288 230 } … … 302 244 * We're done. 303 245 */ 304 # if PGM_GST_TYPE == PGM_TYPE_EPT 305 uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK; 306 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 307 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 308 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 309 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 310 uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK; 311 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute) 312 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite) 313 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1) 314 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed) 315 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty) 316 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0) 317 | fEffectiveEpt; 246 # if PGM_GST_TYPE == PGM_TYPE_32BIT 247 fEffective &= Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A); 248 # else 249 fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A)) 250 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */; 251 # endif 252 fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G); 253 fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT; 254 pWalk->Core.fEffective = fEffective; 255 318 256 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 319 pWalk->Core.fEffectiveUS = true; 320 pWalk->Core.fEffectiveNX = !fExecute; 321 # else 322 # if PGM_GST_TYPE == PGM_TYPE_32BIT 323 fEffective &= Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A); 324 # else 325 fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A)) 326 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */; 327 # endif 328 fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G); 329 fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT; 330 pWalk->Core.fEffective = fEffective; 331 332 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 333 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US); 334 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 335 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu); 336 # else 337 pWalk->Core.fEffectiveNX = false; 338 # endif 257 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US); 258 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 259 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu); 260 # else 261 pWalk->Core.fEffectiveNX = false; 339 262 # endif 340 263 pWalk->Core.fBigPage = true; 341 264 pWalk->Core.fSucceeded = true; 342 265 343 pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 344 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 266 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 267 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 268 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 269 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk); 270 # endif 271 pWalk->Core.GCPhys = GCPhysPde; 345 272 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys); 346 273 return VINF_SUCCESS; … … 349 276 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde))) 350 277 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 351 # if PGM_GST_TYPE == PGM_TYPE_EPT 352 uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK; 353 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 354 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 355 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 356 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK; 357 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute) 358 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite) 359 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1) 360 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed) 361 | fEffectiveEpt; 362 # elif PGM_GST_TYPE == PGM_TYPE_32BIT 278 # if PGM_GST_TYPE == PGM_TYPE_32BIT 363 279 pWalk->Core.fEffective = fEffective &= Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A); 364 280 # else … … 368 284 369 285 /* 370 * The PT E.286 * The PT. 371 287 */ 372 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt); 288 RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde); 289 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 290 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk); 291 # endif 292 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pWalk->pPt); 373 293 if (RT_SUCCESS(rc)) { /* probable */ } 374 294 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); … … 389 309 * We're done. 390 310 */ 391 # if PGM_GST_TYPE == PGM_TYPE_EPT 392 uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK; 393 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE); 394 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE); 395 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED); 396 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY); 397 uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK; 398 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute) 399 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite) 400 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1) 401 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed) 402 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty) 403 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0) 404 | fEffectiveEpt; 405 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 406 pWalk->Core.fEffectiveUS = true; 407 pWalk->Core.fEffectiveNX = !fExecute; 408 # else 409 # if PGM_GST_TYPE == PGM_TYPE_32BIT 311 # if PGM_GST_TYPE == PGM_TYPE_32BIT 410 312 fEffective &= Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A); 411 # 313 # else 412 314 fEffective &= ((uint32_t)Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A)) 413 414 # 315 | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */; 316 # endif 415 317 fEffective |= (uint32_t)Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G); 416 318 pWalk->Core.fEffective = fEffective; 417 319 418 320 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW); 419 # if PGM_GST_TYPE == PGM_TYPE_EPT420 pWalk->Core.fEffectiveUS = true;421 # else422 321 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US); 423 # endif 424 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 322 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 425 323 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu); 426 # 324 # else 427 325 pWalk->Core.fEffectiveNX = false; 428 # endif429 326 # endif 430 327 pWalk->Core.fSucceeded = true; 431 pWalk->Core.GCPhys = GST_GET_PDE_GCPHYS(Pte) /** @todo Shouldn't this be PTE_GCPHYS? */ 432 | (GCPtr & PAGE_OFFSET_MASK); 328 329 RTGCPHYS GCPhysPte = GST_GET_PDE_GCPHYS(Pte) /** @todo This should be GST_GET_PTE_GCPHYS. */ 330 | (GCPtr & PAGE_OFFSET_MASK); 331 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 332 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk); 333 # endif 334 pWalk->Core.GCPhys = GCPhysPte; 433 335 return VINF_SUCCESS; 434 336 } 435 337 } 436 338 437 #endif /* 32BIT, PAE, EPT,AMD64 */339 #endif /* 32BIT, PAE, AMD64 */ 438 340 439 341 /** … … 468 370 #elif PGM_GST_TYPE == PGM_TYPE_32BIT \ 469 371 || PGM_GST_TYPE == PGM_TYPE_PAE \ 470 || PGM_GST_TYPE == PGM_TYPE_EPT \471 372 || PGM_GST_TYPE == PGM_TYPE_AMD64 472 373 -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r92177 r92186 798 798 pPGM->enmShadowMode = PGMMODE_INVALID; 799 799 pPGM->enmGuestMode = PGMMODE_INVALID; 800 pPGM->enmGuestSlatMode = PGMSLAT_INVALID; 800 801 pPGM->idxGuestModeData = UINT8_MAX; 801 802 pPGM->idxShadowModeData = UINT8_MAX; … … 1076 1077 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID; 1077 1078 pVCpu->pgm.s.enmGuestMode = PGMMODE_INVALID; 1079 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_INVALID; 1078 1080 pVCpu->pgm.s.idxGuestModeData = UINT8_MAX; 1079 1081 pVCpu->pgm.s.idxShadowModeData = UINT8_MAX; … … 1987 1989 /* print info. */ 1988 1990 if (fGuest) 1991 { 1989 1992 pHlp->pfnPrintf(pHlp, "Guest paging mode (VCPU #%u): %s (changed %RU64 times), A20 %s (changed %RU64 times)\n", 1990 1993 pVCpu->idCpu, PGMGetModeName(pVCpu->pgm.s.enmGuestMode), pVCpu->pgm.s.cGuestModeChanges.c, 1991 1994 pVCpu->pgm.s.fA20Enabled ? "enabled" : "disabled", pVCpu->pgm.s.cA20Changes.c); 1995 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1996 if (pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID) 1997 pHlp->pfnPrintf(pHlp, "Guest SLAT mode (VCPU #%u): %s\n", pVCpu->idCpu, 1998 PGMGetSlatModeName(pVCpu->pgm.s.enmGuestSlatMode)); 1999 #endif 2000 } 1992 2001 if (fShadow) 1993 2002 pHlp->pfnPrintf(pHlp, "Shadow paging mode (VCPU #%u): %s\n", pVCpu->idCpu, PGMGetModeName(pVCpu->pgm.s.enmShadowMode)); -
trunk/src/VBox/VMM/include/PGMInternal.h
r92177 r92186 199 199 #define PGM_TYPE_END (PGM_TYPE_NONE + 1) 200 200 #define PGM_TYPE_FIRST_SHADOW PGM_TYPE_32BIT /**< The first type used by shadow paging. */ 201 /** @} */ 202 203 /** @name Defines used to indicate the second-level 204 * address translation (SLAT) modes in the templates. 205 * @{ */ 206 #define PGM_SLAT_TYPE_EPT (PGM_TYPE_END + 1) 207 #define PGM_SLAT_TYPE_32BIT (PGM_TYPE_END + 2) 208 #define PGM_SLAT_TYPE_PAE (PGM_TYPE_END + 3) 209 #define PGM_SLAT_TYPE_AMD64 (PGM_TYPE_END + 4) 201 210 /** @} */ 202 211 … … 2331 2340 RTGCPTR GCPtr; 2332 2341 2342 /** The nested-guest physical address that is being resolved if this is a 2343 * second-level walk (input). 2344 * @remarks only valid if fIsSlat is set. */ 2345 RTGCPHYS GCPhysNested; 2346 2333 2347 /** The guest physical address that is the result of the walk. 2334 2348 * @remarks only valid if fSucceeded is set. */ … … 2337 2351 /** Set if the walk succeeded, i.d. GCPhys is valid. */ 2338 2352 bool fSucceeded; 2353 /** Whether this is a second-level translation. */ 2354 bool fIsSlat; 2355 /** Whether the linear address (GCPtr) is valid and thus the cause for the 2356 * second-level translation. */ 2357 bool fIsLinearAddrValid; 2339 2358 /** The level problem arrised at. 2340 2359 * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is … … 2357 2376 /** The effective X86_PTE_NX flag for the address. */ 2358 2377 bool fEffectiveNX; 2359 bool afPadding1[2];2360 2378 /** Effective flags thus far: RW, US, PWT, PCD, A, ~NX >> 63. 2361 2379 * The NX bit is inverted and shifted down 63 places to bit 0. */ … … 2513 2531 typedef PGMPTWALKGSTAMD64 const *PCPGMPTWALKGSTAMD64; 2514 2532 2515 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT2516 2533 /** 2517 2534 * Guest page table walk for the EPT mode. … … 2542 2559 /** Pointer to a const EPT guest page table walk. */ 2543 2560 typedef PGMPTWALKGSTEPT const *PCPGMPTWALKGSTEPT; 2544 #endif2545 2561 2546 2562 /** … … 2625 2641 * convension). */ 2626 2642 PGMPTWALKGST32BIT Legacy; 2627 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2628 /** The page walker for EPT. */ 2643 /** The page walker for EPT (SLAT). */ 2629 2644 PGMPTWALKGSTEPT Ept; 2630 #endif2631 2645 } u; 2632 2646 /** Indicates which part of the union is valid. */ … … 2669 2683 #define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name 2670 2684 #define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name 2671 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT2672 # define PGM_GST_NAME_EPT(name) PGM_CTX(pgm,GstEPT##name)2673 # define PGM_GST_NAME_RC_EPT_STR(name) "pgmRCGstEPT" #name2674 # define PGM_GST_NAME_R0_EPT_STR(name) "pgmR0GstEPT" #name2675 #endif2676 2685 #define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name) 2686 2687 #define PGM_GST_SLAT_NAME_EPT(name) PGM_CTX(pgm,GstSlatEpt##name) 2688 #define PGM_GST_SLAT_NAME_RC_EPT_STR(name) "pgmRCGstSlatEpt" #name 2689 #define PGM_GST_SLAT_NAME_R0_EPT_STR(name) "pgmR0GstSlatEpt" #name 2690 #define PGM_GST_SLAT_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_SLAT_NAME(name) 2677 2691 2678 2692 #define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name) … … 2732 2746 #define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name) 2733 2747 #define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name) 2734 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT2735 # define PGM_BTH_NAME_EPT_EPT(name) PGM_CTX(pgm,BthEPTEPT##name)2736 #endif2737 2748 #define PGM_BTH_NAME_NONE_REAL(name) PGM_CTX(pgm,BthNoneReal##name) 2738 2749 #define PGM_BTH_NAME_NONE_PROT(name) PGM_CTX(pgm,BthNoneProt##name) … … 2769 2780 #define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name 2770 2781 #define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name 2771 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT2772 # define PGM_BTH_NAME_RC_EPT_EPT_STR(name) "pgmRCBthEPTEPT" #name2773 #endif2774 2782 2775 2783 #define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name … … 2802 2810 #define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name 2803 2811 #define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name 2804 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT2805 # define PGM_BTH_NAME_R0_EPT_EPT_STR(name) "pgmR0BthEPTEPT" #name2806 #endif2807 2812 2808 2813 #define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name) … … 2827 2832 2828 2833 /** The length of g_aPgmGuestModeData. */ 2829 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2830 # define PGM_GUEST_MODE_DATA_ARRAY_SIZE (PGM_TYPE_EPT + 1) 2831 #elif defined(VBOX_WITH_64_BITS_GUESTS) 2834 #if VBOX_WITH_64_BITS_GUESTS 2832 2835 # define PGM_GUEST_MODE_DATA_ARRAY_SIZE (PGM_TYPE_AMD64 + 1) 2833 2836 #else … … 3517 3520 /** The guest paging mode. */ 3518 3521 PGMMODE enmGuestMode; 3522 /** The guest second level address translation mode. */ 3523 PGMSLAT enmGuestSlatMode; 3519 3524 /** Guest mode data table index (PGM_TYPE_XXX). */ 3520 3525 uint8_t volatile idxGuestModeData; … … 3524 3529 uint8_t volatile idxBothModeData; 3525 3530 /** Alignment padding. */ 3526 uint8_t abPadding[ 5];3531 uint8_t abPadding[1]; 3527 3532 3528 3533 /** The current physical address represented in the guest CR3 register. */
Note:
See TracChangeset
for help on using the changeset viewer.