1 | /* $Id: PGMAllGstSlatEpt.h 92186 2021-11-03 08:31:27Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * VBox - Page Manager, Guest EPT SLAT - All context code.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2021 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 | #if PGM_GST_TYPE == PGM_TYPE_EPT
|
---|
19 | DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
|
---|
20 | {
|
---|
21 | NOREF(pVCpu);
|
---|
22 | pWalk->Core.fNotPresent = true;
|
---|
23 | pWalk->Core.uLevel = (uint8_t)iLevel;
|
---|
24 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
25 | }
|
---|
26 |
|
---|
27 |
|
---|
28 | DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel, int rc)
|
---|
29 | {
|
---|
30 | AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
|
---|
31 | pWalk->Core.fBadPhysAddr = true;
|
---|
32 | pWalk->Core.uLevel = (uint8_t)iLevel;
|
---|
33 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
34 | }
|
---|
35 |
|
---|
36 |
|
---|
37 | DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
|
---|
38 | {
|
---|
39 | NOREF(pVCpu);
|
---|
40 | pWalk->Core.fRsvdError = true;
|
---|
41 | pWalk->Core.uLevel = (uint8_t)iLevel;
|
---|
42 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
43 | }
|
---|
44 |
|
---|
45 |
|
---|
46 | DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(Walk)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
|
---|
47 | PGSTPTWALK pWalk)
|
---|
48 | {
|
---|
49 | int rc;
|
---|
50 | RT_ZERO(*pWalk);
|
---|
51 | pWalk->Core.GCPtr = GCPtrNested;
|
---|
52 | pWalk->Core.GCPhysNested = GCPhysNested;
|
---|
53 | pWalk->Core.fIsSlat = true;
|
---|
54 | pWalk->Core.fIsLinearAddrValid = fIsLinearAddrValid;
|
---|
55 |
|
---|
56 | uint32_t fEffective;
|
---|
57 | {
|
---|
58 | rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4);
|
---|
59 | if (RT_SUCCESS(rc)) { /* probable */ }
|
---|
60 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
|
---|
61 |
|
---|
62 | PEPTPML4E pPml4e;
|
---|
63 | pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
|
---|
64 | EPTPML4E Pml4e;
|
---|
65 | pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
|
---|
66 |
|
---|
67 | if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
|
---|
68 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, 4);
|
---|
69 |
|
---|
70 | if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
|
---|
71 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 4);
|
---|
72 |
|
---|
73 | Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
|
---|
74 | uint64_t const fEptAttrs = Pml4e.u & EPT_PML4E_ATTR_MASK;
|
---|
75 | uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
|
---|
76 | uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
|
---|
77 | uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
|
---|
78 | uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
|
---|
79 | uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
|
---|
80 | pWalk->Core.fEffective = fEffective = RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
|
---|
81 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fRead & fWrite)
|
---|
82 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
|
---|
83 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
|
---|
84 | | fEffectiveEpt;
|
---|
85 |
|
---|
86 | rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pWalk->pPdpt);
|
---|
87 | if (RT_SUCCESS(rc)) { /* probable */ }
|
---|
88 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
|
---|
89 | }
|
---|
90 | {
|
---|
91 | PEPTPDPTE pPdpte;
|
---|
92 | pWalk->pPdpte = pPdpte = &pWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
|
---|
93 | EPTPDPTE Pdpte;
|
---|
94 | pWalk->Pdpte.u = Pdpte.u = pPdpte->u;
|
---|
95 |
|
---|
96 | if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }
|
---|
97 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, 3);
|
---|
98 |
|
---|
99 | /* The order of the following 2 "if" statements matter. */
|
---|
100 | if (GST_IS_PDPE_VALID(pVCpu, Pdpte))
|
---|
101 | {
|
---|
102 | uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK;
|
---|
103 | uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
|
---|
104 | uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
|
---|
105 | uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
|
---|
106 | uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
|
---|
107 | pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
|
---|
108 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
|
---|
109 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
|
---|
110 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
|
---|
111 | | fEffectiveEpt;
|
---|
112 | }
|
---|
113 | else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte))
|
---|
114 | {
|
---|
115 | uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;
|
---|
116 | uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
|
---|
117 | uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
|
---|
118 | uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
|
---|
119 | uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
|
---|
120 | uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
|
---|
121 | pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
|
---|
122 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
|
---|
123 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
|
---|
124 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
|
---|
125 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty)
|
---|
126 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
|
---|
127 | | fEffectiveEpt;
|
---|
128 | pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
|
---|
129 | pWalk->Core.fEffectiveUS = true;
|
---|
130 | pWalk->Core.fEffectiveNX = !fExecute;
|
---|
131 | pWalk->Core.fGigantPage = true;
|
---|
132 | pWalk->Core.fSucceeded = true;
|
---|
133 | pWalk->Core.GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)
|
---|
134 | | (GCPhysNested & GST_GIGANT_PAGE_OFFSET_MASK);
|
---|
135 | PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
|
---|
136 | return VINF_SUCCESS;
|
---|
137 | }
|
---|
138 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 3);
|
---|
139 | }
|
---|
140 | {
|
---|
141 | PGSTPDE pPde;
|
---|
142 | pWalk->pPde = pPde = &pWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK];
|
---|
143 | GSTPDE Pde;
|
---|
144 | pWalk->Pde.u = Pde.u = pPde->u;
|
---|
145 | if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
|
---|
146 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, 2);
|
---|
147 | if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
|
---|
148 | {
|
---|
149 | if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
|
---|
150 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
|
---|
151 |
|
---|
152 | uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK;
|
---|
153 | uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
|
---|
154 | uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
|
---|
155 | uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
|
---|
156 | uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
|
---|
157 | uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
|
---|
158 | pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
|
---|
159 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
|
---|
160 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
|
---|
161 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
|
---|
162 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty)
|
---|
163 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
|
---|
164 | | fEffectiveEpt;
|
---|
165 | pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
|
---|
166 | pWalk->Core.fEffectiveUS = true;
|
---|
167 | pWalk->Core.fEffectiveNX = !fExecute;
|
---|
168 | pWalk->Core.fBigPage = true;
|
---|
169 | pWalk->Core.fSucceeded = true;
|
---|
170 | pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
|
---|
171 | | (GCPhysNested & GST_BIG_PAGE_OFFSET_MASK);
|
---|
172 | PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
|
---|
173 | return VINF_SUCCESS;
|
---|
174 | }
|
---|
175 |
|
---|
176 | if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
|
---|
177 | return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
|
---|
178 |
|
---|
179 | uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK;
|
---|
180 | uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
|
---|
181 | uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
|
---|
182 | uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
|
---|
183 | uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
|
---|
184 | pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
|
---|
185 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
|
---|
186 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
|
---|
187 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
|
---|
188 | | fEffectiveEpt;
|
---|
189 |
|
---|
190 | rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);
|
---|
191 | if (RT_SUCCESS(rc)) { /* probable */ }
|
---|
192 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
|
---|
193 | }
|
---|
194 | {
|
---|
195 | PGSTPTE pPte;
|
---|
196 | pWalk->pPte = pPte = &pWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK];
|
---|
197 | GSTPTE Pte;
|
---|
198 | pWalk->Pte.u = Pte.u = pPte->u;
|
---|
199 |
|
---|
200 | if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
|
---|
201 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, 1);
|
---|
202 |
|
---|
203 | if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
|
---|
204 | else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 1);
|
---|
205 |
|
---|
206 | uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK;
|
---|
207 | uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
|
---|
208 | uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
|
---|
209 | uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
|
---|
210 | uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
|
---|
211 | uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
|
---|
212 | pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
|
---|
213 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
|
---|
214 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
|
---|
215 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
|
---|
216 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty)
|
---|
217 | | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
|
---|
218 | | fEffectiveEpt;
|
---|
219 | pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
|
---|
220 | pWalk->Core.fEffectiveUS = true;
|
---|
221 | pWalk->Core.fEffectiveNX = !fExecute;
|
---|
222 | pWalk->Core.fSucceeded = true;
|
---|
223 | pWalk->Core.GCPhys = GST_GET_PTE_GCPHYS(Pte)
|
---|
224 | | (GCPhysNested & PAGE_OFFSET_MASK);
|
---|
225 | return VINF_SUCCESS;
|
---|
226 | }
|
---|
227 | }
|
---|
228 | #else
|
---|
229 | # error "Guest paging type must be EPT."
|
---|
230 | #endif
|
---|
231 |
|
---|