VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllShw.h@ 96945

Last change on this file since 96945 was 96900, checked in by vboxsync, 2 years ago

VMM/PGM: Drop the fIs64BitsPagingMode parameter to PGM_SHW_DECL(int, Enter), it was for 32-bit AMD-V hosts. bugref:10092

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 25.3 KB
Line 
1/* $Id: PGMAllShw.h 96900 2022-09-27 13:30:45Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#undef SHWUINT
33#undef SHWPT
34#undef PSHWPT
35#undef SHWPTE
36#undef PSHWPTE
37#undef SHWPD
38#undef PSHWPD
39#undef SHWPDE
40#undef PSHWPDE
41#undef SHW_PDE_PG_MASK
42#undef SHW_PD_SHIFT
43#undef SHW_PD_MASK
44#undef SHW_PDE_ATOMIC_SET
45#undef SHW_PDE_ATOMIC_SET2
46#undef SHW_PDE_IS_P
47#undef SHW_PDE_IS_A
48#undef SHW_PDE_IS_BIG
49#undef SHW_PTE_PG_MASK
50#undef SHW_PTE_IS_P
51#undef SHW_PTE_IS_RW
52#undef SHW_PTE_IS_US
53#undef SHW_PTE_IS_A
54#undef SHW_PTE_IS_D
55#undef SHW_PTE_IS_P_RW
56#undef SHW_PTE_IS_TRACK_DIRTY
57#undef SHW_PTE_GET_HCPHYS
58#undef SHW_PTE_GET_U
59#undef SHW_PTE_LOG64
60#undef SHW_PTE_SET
61#undef SHW_PTE_ATOMIC_SET
62#undef SHW_PTE_ATOMIC_SET2
63#undef SHW_PTE_SET_RO
64#undef SHW_PTE_SET_RW
65#undef SHW_PT_SHIFT
66#undef SHW_PT_MASK
67#undef SHW_TOTAL_PD_ENTRIES
68#undef SHW_PDPT_SHIFT
69#undef SHW_PDPT_MASK
70#undef SHW_PDPE_PG_MASK
71
72#if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
73# define SHWUINT uint32_t
74# define SHWPT X86PT
75# define PSHWPT PX86PT
76# define SHWPTE X86PTE
77# define PSHWPTE PX86PTE
78# define SHWPD X86PD
79# define PSHWPD PX86PD
80# define SHWPDE X86PDE
81# define PSHWPDE PX86PDE
82# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
83# define SHW_PD_SHIFT X86_PD_SHIFT
84# define SHW_PD_MASK X86_PD_MASK
85# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
86# define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
87# define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
88# define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
89# define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU32(&(Pde).u, (uNew)); } while (0)
90# define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU32(&(Pde).u, (Pde2).u); } while (0)
91# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
92# define SHW_PTE_IS_P(Pte) ( (Pte).u & X86_PTE_P )
93# define SHW_PTE_IS_RW(Pte) ( (Pte).u & X86_PTE_RW )
94# define SHW_PTE_IS_US(Pte) ( (Pte).u & X86_PTE_US )
95# define SHW_PTE_IS_A(Pte) ( (Pte).u & X86_PTE_A )
96# define SHW_PTE_IS_D(Pte) ( (Pte).u & X86_PTE_D )
97# define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (X86_PTE_P | X86_PTE_RW)) == (X86_PTE_P | X86_PTE_RW) )
98# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
99# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
100# define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u )
101# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
102# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
103# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0)
104# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0)
105# define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(X86PGUINT)X86_PTE_RW; } while (0)
106# define SHW_PTE_SET_RW(Pte) do { (Pte).u |= X86_PTE_RW; } while (0)
107# define SHW_PT_SHIFT X86_PT_SHIFT
108# define SHW_PT_MASK X86_PT_MASK
109
110#elif PGM_SHW_TYPE == PGM_TYPE_EPT
111# define SHWUINT uint64_t
112# define SHWPT EPTPT
113# define PSHWPT PEPTPT
114# define SHWPTE EPTPTE
115# define PSHWPTE PEPTPTE
116# define SHWPD EPTPD
117# define PSHWPD PEPTPD
118# define SHWPDE EPTPDE
119# define PSHWPDE PEPTPDE
120# define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
121# define SHW_PD_SHIFT EPT_PD_SHIFT
122# define SHW_PD_MASK EPT_PD_MASK
123# define SHW_PDE_IS_P(Pde) ( (Pde).u & EPT_E_READ /* always set*/ )
124# define SHW_PDE_IS_A(Pde) ( 1 ) /* We don't use EPT_E_ACCESSED, use with care! */
125# define SHW_PDE_IS_BIG(Pde) ( (Pde).u & EPT_E_LEAF )
126# define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
127# define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
128# define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
129# define SHW_PTE_IS_P(Pte) ( (Pte).u & EPT_E_READ ) /* Approximation, works for us. */
130# define SHW_PTE_IS_RW(Pte) ( (Pte).u & EPT_E_WRITE )
131# define SHW_PTE_IS_US(Pte) ( true )
132# define SHW_PTE_IS_A(Pte) ( true )
133# define SHW_PTE_IS_D(Pte) ( true )
134# define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (EPT_E_READ | EPT_E_WRITE)) == (EPT_E_READ | EPT_E_WRITE) )
135# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
136# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & EPT_PTE_PG_MASK )
137# define SHW_PTE_LOG64(Pte) ( (Pte).u )
138# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
139# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
140# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0)
141# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
142# define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(uint64_t)EPT_E_WRITE; } while (0)
143# define SHW_PTE_SET_RW(Pte) do { (Pte).u |= EPT_E_WRITE; } while (0)
144# define SHW_PT_SHIFT EPT_PT_SHIFT
145# define SHW_PT_MASK EPT_PT_MASK
146# define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
147# define SHW_PDPT_MASK EPT_PDPT_MASK
148# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
149# define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES * EPT_PG_AMD64_PDPE_ENTRIES)
150
151#else
152# define SHWUINT uint64_t
153# define SHWPT PGMSHWPTPAE
154# define PSHWPT PPGMSHWPTPAE
155# define SHWPTE PGMSHWPTEPAE
156# define PSHWPTE PPGMSHWPTEPAE
157# define SHWPD X86PDPAE
158# define PSHWPD PX86PDPAE
159# define SHWPDE X86PDEPAE
160# define PSHWPDE PX86PDEPAE
161# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
162# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
163# define SHW_PD_MASK X86_PD_PAE_MASK
164# define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
165# define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
166# define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
167# define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
168# define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
169# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
170# define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte)
171# define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte)
172# define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte)
173# define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte)
174# define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte)
175# define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte)
176# define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte)
177# define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte)
178# define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte)
179# define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */
180# define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew)
181# define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew)
182# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2)
183# define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte)
184# define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte)
185# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
186# define SHW_PT_MASK X86_PT_PAE_MASK
187
188# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64 || /* whatever: */ PGM_SHW_TYPE == PGM_TYPE_NONE
189# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
190# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
191# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
192# define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
193
194# elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
195# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
196# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
197# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
198# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
199
200# else
201# error "Misconfigured PGM_SHW_TYPE or something..."
202# endif
203#endif
204
205#if PGM_SHW_TYPE == PGM_TYPE_NONE && PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
206# error "PGM_TYPE_IS_NESTED_OR_EPT is true for PGM_TYPE_NONE!"
207#endif
208
209
210
211/*********************************************************************************************************************************
212* Internal Functions *
213*********************************************************************************************************************************/
214RT_C_DECLS_BEGIN
215PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
216PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
217PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu);
218#ifdef IN_RING3
219PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
220#endif
221RT_C_DECLS_END
222
223
224/**
225 * Enters the shadow mode.
226 *
227 * @returns VBox status code.
228 * @param pVCpu The cross context virtual CPU structure.
229 */
230PGM_SHW_DECL(int, Enter)(PVMCPUCC pVCpu)
231{
232#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
233
234# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
235 RTGCPHYS GCPhysCR3;
236 PGMPOOLKIND enmKind;
237 if (pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_EPT)
238 {
239 GCPhysCR3 = RT_BIT_64(63);
240 enmKind = PGMPOOLKIND_ROOT_NESTED;
241 }
242 else
243 {
244 GCPhysCR3 = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
245 enmKind = PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4;
246 }
247# else
248 RTGCPHYS const GCPhysCR3 = RT_BIT_64(63);
249 PGMPOOLKIND const enmKind = PGMPOOLKIND_ROOT_NESTED;
250# endif
251 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
252
253 Assert(HMIsNestedPagingActive(pVM));
254 Assert(pVM->pgm.s.fNestedPaging);
255 Assert(!pVCpu->pgm.s.pShwPageCR3R3);
256
257 PGM_LOCK_VOID(pVM);
258
259 PPGMPOOLPAGE pNewShwPageCR3;
260 int rc = pgmPoolAlloc(pVM, GCPhysCR3, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
261 NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
262 &pNewShwPageCR3);
263 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
264
265 pVCpu->pgm.s.pShwPageCR3R3 = pgmPoolConvertPageToR3(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
266 pVCpu->pgm.s.pShwPageCR3R0 = pgmPoolConvertPageToR0(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
267
268 PGM_UNLOCK(pVM);
269
270 Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
271#else
272 NOREF(pVCpu);
273#endif
274 return VINF_SUCCESS;
275}
276
277
278/**
279 * Exits the shadow mode.
280 *
281 * @returns VBox status code.
282 * @param pVCpu The cross context virtual CPU structure.
283 */
284PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu)
285{
286#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
287 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
288 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
289 {
290 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
291
292 PGM_LOCK_VOID(pVM);
293
294# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT
295 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
296 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
297# endif
298
299 /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
300 * We currently assert when you try to free one of them; don't bother to really allow this.
301 *
302 * Note that this is two nested paging root pages max. This isn't a leak. They are reused.
303 */
304 /* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
305
306 pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
307 pVCpu->pgm.s.pShwPageCR3R3 = 0;
308 pVCpu->pgm.s.pShwPageCR3R0 = 0;
309
310 PGM_UNLOCK(pVM);
311
312 Log(("Leave nested shadow paging mode\n"));
313 }
314#else
315 RT_NOREF_PV(pVCpu);
316#endif
317 return VINF_SUCCESS;
318}
319
320
321/**
322 * Gets effective page information (from the VMM page directory).
323 *
324 * @returns VBox status code.
325 * @param pVCpu The cross context virtual CPU structure.
326 * @param GCPtr Guest Context virtual address of the page.
327 * @param pfFlags Where to store the flags. These are X86_PTE_*.
328 * @param pHCPhys Where to store the HC physical address of the page.
329 * This is page aligned.
330 * @remark You should use PGMMapGetPage() for pages in a mapping.
331 */
332PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
333{
334#if PGM_SHW_TYPE == PGM_TYPE_NONE
335 RT_NOREF(pVCpu, GCPtr);
336 AssertFailed();
337 *pfFlags = 0;
338 *pHCPhys = NIL_RTHCPHYS;
339 return VERR_PGM_SHW_NONE_IPE;
340
341#else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
342 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
343
344 PGM_LOCK_ASSERT_OWNER(pVM);
345
346 /*
347 * Get the PDE.
348 */
349# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
350 X86PDEPAE Pde;
351
352 /* PML4 */
353 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
354 if (!(Pml4e.u & X86_PML4E_P))
355 return VERR_PAGE_TABLE_NOT_PRESENT;
356
357 /* PDPT */
358 PX86PDPT pPDPT;
359 int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
360 if (RT_FAILURE(rc))
361 return rc;
362 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
363 X86PDPE Pdpe = pPDPT->a[iPDPT];
364 if (!(Pdpe.u & X86_PDPE_P))
365 return VERR_PAGE_TABLE_NOT_PRESENT;
366
367 /* PD */
368 PX86PDPAE pPd;
369 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
370 if (RT_FAILURE(rc))
371 return rc;
372 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
373 Pde = pPd->a[iPd];
374
375 /* Merge accessed, write, user and no-execute bits into the PDE. */
376 AssertCompile(X86_PML4E_A == X86_PDPE_A && X86_PML4E_A == X86_PDE_A);
377 AssertCompile(X86_PML4E_RW == X86_PDPE_RW && X86_PML4E_RW == X86_PDE_RW);
378 AssertCompile(X86_PML4E_US == X86_PDPE_US && X86_PML4E_US == X86_PDE_US);
379 AssertCompile(X86_PML4E_NX == X86_PDPE_LM_NX && X86_PML4E_NX == X86_PDE_PAE_NX);
380 Pde.u &= (Pml4e.u & Pdpe.u) | ~(X86PGPAEUINT)(X86_PML4E_A | X86_PML4E_RW | X86_PML4E_US);
381 Pde.u |= (Pml4e.u | Pdpe.u) & X86_PML4E_NX;
382
383# elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
384 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
385
386# elif PGM_SHW_TYPE == PGM_TYPE_EPT
387 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);
388 PEPTPD pPDDst;
389 int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
390 if (rc == VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
391 { /* likely */ }
392 else
393 {
394 AssertRC(rc);
395 return rc;
396 }
397 Assert(pPDDst);
398
399 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
400 EPTPDE Pde = pPDDst->a[iPd];
401
402# elif PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
403 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
404
405# else
406# error "Misconfigured PGM_SHW_TYPE or something..."
407# endif
408 if (!SHW_PDE_IS_P(Pde))
409 return VERR_PAGE_TABLE_NOT_PRESENT;
410
411 /* Deal with large pages. */
412 if (SHW_PDE_IS_BIG(Pde))
413 {
414 /*
415 * Store the results.
416 * RW and US flags depend on the entire page translation hierarchy - except for
417 * legacy PAE which has a simplified PDPE.
418 */
419 if (pfFlags)
420 {
421 *pfFlags = (Pde.u & ~SHW_PDE_PG_MASK);
422# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
423 if ( (Pde.u & X86_PTE_PAE_NX)
424# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
425 && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
426# endif
427 )
428 *pfFlags |= X86_PTE_PAE_NX;
429# endif
430 }
431
432 if (pHCPhys)
433 *pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
434
435 return VINF_SUCCESS;
436 }
437
438 /*
439 * Get PT entry.
440 */
441 PSHWPT pPT;
442 int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
443 if (RT_FAILURE(rc2))
444 return rc2;
445 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
446 SHWPTE Pte = pPT->a[iPt];
447 if (!SHW_PTE_IS_P(Pte))
448 return VERR_PAGE_NOT_PRESENT;
449
450 /*
451 * Store the results.
452 * RW and US flags depend on the entire page translation hierarchy - except for
453 * legacy PAE which has a simplified PDPE.
454 */
455 if (pfFlags)
456 {
457 *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK)
458 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
459
460# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
461 /* The NX bit is determined by a bitwise OR between the PT and PD */
462 if ( ((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX)
463# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
464 && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
465# endif
466 )
467 *pfFlags |= X86_PTE_PAE_NX;
468# endif
469 }
470
471 if (pHCPhys)
472 *pHCPhys = SHW_PTE_GET_HCPHYS(Pte);
473
474 return VINF_SUCCESS;
475#endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
476}
477
478
479/**
480 * Modify page flags for a range of pages in the shadow context.
481 *
482 * The existing flags are ANDed with the fMask and ORed with the fFlags.
483 *
484 * @returns VBox status code.
485 * @param pVCpu The cross context virtual CPU structure.
486 * @param GCPtr Virtual address of the first page in the range. Page aligned!
487 * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
488 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
489 * @param fMask The AND mask - page flags X86_PTE_*.
490 * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
491 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
492 * @remark You must use PGMMapModifyPage() for pages in a mapping.
493 */
494PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
495{
496#if PGM_SHW_TYPE == PGM_TYPE_NONE
497 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask, fOpFlags);
498 AssertFailed();
499 return VERR_PGM_SHW_NONE_IPE;
500
501#else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
502 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
503 PGM_LOCK_ASSERT_OWNER(pVM);
504
505 /*
506 * Walk page tables and pages till we're done.
507 */
508 int rc;
509 for (;;)
510 {
511 /*
512 * Get the PDE.
513 */
514# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
515 X86PDEPAE Pde;
516 /* PML4 */
517 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
518 if (!(Pml4e.u & X86_PML4E_P))
519 return VERR_PAGE_TABLE_NOT_PRESENT;
520
521 /* PDPT */
522 PX86PDPT pPDPT;
523 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
524 if (RT_FAILURE(rc))
525 return rc;
526 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
527 X86PDPE Pdpe = pPDPT->a[iPDPT];
528 if (!(Pdpe.u & X86_PDPE_P))
529 return VERR_PAGE_TABLE_NOT_PRESENT;
530
531 /* PD */
532 PX86PDPAE pPd;
533 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
534 if (RT_FAILURE(rc))
535 return rc;
536 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
537 Pde = pPd->a[iPd];
538
539# elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
540 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
541
542# elif PGM_SHW_TYPE == PGM_TYPE_EPT
543 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);
544 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
545 PEPTPD pPDDst;
546 EPTPDE Pde;
547
548 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
549 if (rc != VINF_SUCCESS)
550 {
551 AssertRC(rc);
552 return rc;
553 }
554 Assert(pPDDst);
555 Pde = pPDDst->a[iPd];
556
557# else /* PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT */
558 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
559# endif
560 if (!SHW_PDE_IS_P(Pde))
561 return VERR_PAGE_TABLE_NOT_PRESENT;
562
563 AssertFatalMsg(!SHW_PDE_IS_BIG(Pde), ("Pde=%#RX64\n", (uint64_t)Pde.u));
564
565 /*
566 * Map the page table.
567 */
568 PSHWPT pPT;
569 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
570 if (RT_FAILURE(rc))
571 return rc;
572
573 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
574 while (iPTE < RT_ELEMENTS(pPT->a))
575 {
576 if (SHW_PTE_IS_P(pPT->a[iPTE]))
577 {
578 SHWPTE const OrgPte = pPT->a[iPTE];
579 SHWPTE NewPte;
580
581 SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
582 if (!SHW_PTE_IS_P(NewPte))
583 {
584 /** @todo Some CSAM code path might end up here and upset
585 * the page pool. */
586 AssertMsgFailed(("NewPte=%#RX64 OrgPte=%#RX64 GCPtr=%#RGv\n", SHW_PTE_LOG64(NewPte), SHW_PTE_LOG64(OrgPte), GCPtr));
587 }
588 else if ( SHW_PTE_IS_RW(NewPte)
589 && !SHW_PTE_IS_RW(OrgPte)
590 && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
591 {
592 /** @todo Optimize \#PF handling by caching data. We can
593 * then use this when PGM_MK_PG_IS_WRITE_FAULT is
594 * set instead of resolving the guest physical
595 * address yet again. */
596 PGMPTWALK GstWalk;
597 rc = PGMGstGetPage(pVCpu, GCPtr, &GstWalk);
598 AssertRC(rc);
599 if (RT_SUCCESS(rc))
600 {
601 Assert((GstWalk.fEffective & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));
602 PPGMPAGE pPage = pgmPhysGetPage(pVM, GstWalk.GCPhys);
603 Assert(pPage);
604 if (pPage)
605 {
606 rc = pgmPhysPageMakeWritable(pVM, pPage, GstWalk.GCPhys);
607 AssertRCReturn(rc, rc);
608 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GstWalk.GCPhys, pPage));
609 }
610 }
611 }
612
613 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
614# if PGM_SHW_TYPE == PGM_TYPE_EPT
615 HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
616# else
617 PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
618# endif
619 }
620
621 /* next page */
622 cb -= HOST_PAGE_SIZE;
623 if (!cb)
624 return VINF_SUCCESS;
625 GCPtr += HOST_PAGE_SIZE;
626 iPTE++;
627 }
628 }
629#endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
630}
631
632
633#ifdef IN_RING3
634/**
635 * Relocate any GC pointers related to shadow mode paging.
636 *
637 * @returns VBox status code.
638 * @param pVCpu The cross context virtual CPU structure.
639 * @param offDelta The relocation offset.
640 */
641PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
642{
643 RT_NOREF(pVCpu, offDelta);
644 return VINF_SUCCESS;
645}
646#endif
647
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette