VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllBth.h@ 17289

Last change on this file since 17289 was 17289, checked in by vboxsync, 16 years ago

PGMAllBth.h: Fixed wrong HCPhys mask in EPT mode (EPT_PTE_PG_MASK=X86_PTE_PAE_PG_MASK_FULL, while we must use X86_PTE_PAE_PG_MASK against PGMPAGE::HCPhys, which omits bits 48 thru 51). Could in theory cause these bits to be wrong, although I'm not so sure intel cares.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 207.6 KB
Line 
1/* $Id: PGMAllBth.h 17289 2009-03-03 14:57:16Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
4 *
5 * This file is a big challenge!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Internal Functions *
26*******************************************************************************/
27__BEGIN_DECLS
28PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
29PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCPTR GCPtrPage);
30PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
31PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
32PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
33PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCPTR Addr, unsigned fPage, unsigned uErr);
34PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCPTR GCPtrPage);
35PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
36#ifdef VBOX_STRICT
37PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
38#endif
39#ifdef PGMPOOL_WITH_USER_TRACKING
40DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
41#endif
42PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
43PGM_BTH_DECL(int, UnmapCR3)(PVM pVM);
44__END_DECLS
45
46
47/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
48#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
49# error "Invalid combination; PAE guest implies PAE shadow"
50#endif
51
52#if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
53 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
54# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
55#endif
56
57#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
58 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
59# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
60#endif
61
62#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT) \
63 || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
64# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
65#endif
66
67#ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */
68# define PGM_WITHOUT_MAPPINGS
69#endif
70
71
72#ifndef IN_RING3
73/**
74 * #PF Handler for raw-mode guest execution.
75 *
76 * @returns VBox status code (appropriate for trap handling and GC return).
77 * @param pVM VM Handle.
78 * @param uErr The trap error code.
79 * @param pRegFrame Trap register frame.
80 * @param pvFault The fault address.
81 */
82PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
83{
84# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && defined(VBOX_STRICT)
85 PGMDynCheckLocks(pVM);
86# endif
87
88# if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
89 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
90 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
91
92# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
93 /*
94 * Hide the instruction fetch trap indicator for now.
95 */
96 /** @todo NXE will change this and we must fix NXE in the switcher too! */
97 if (uErr & X86_TRAP_PF_ID)
98 {
99 uErr &= ~X86_TRAP_PF_ID;
100 TRPMSetErrorCode(pVM, uErr);
101 }
102# endif
103
104 /*
105 * Get PDs.
106 */
107 int rc;
108# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
109# if PGM_GST_TYPE == PGM_TYPE_32BIT
110 const unsigned iPDSrc = pvFault >> GST_PD_SHIFT;
111 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
112
113# elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
114
115# if PGM_GST_TYPE == PGM_TYPE_PAE
116 unsigned iPDSrc;
117# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
118 X86PDPE PdpeSrc;
119 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, &PdpeSrc);
120# else
121 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, NULL);
122# endif
123
124# elif PGM_GST_TYPE == PGM_TYPE_AMD64
125 unsigned iPDSrc;
126 PX86PML4E pPml4eSrc;
127 X86PDPE PdpeSrc;
128 PGSTPD pPDSrc;
129
130 pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
131 Assert(pPml4eSrc);
132# endif
133
134 /* Quick check for a valid guest trap. (PAE & AMD64) */
135 if (!pPDSrc)
136 {
137# if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64
138 LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%RGp\n", (int)((pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK));
139# else
140 LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%RGp\n", iPDSrc, CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK));
141# endif
142 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2GuestTrap; });
143 TRPMSetErrorCode(pVM, uErr);
144 return VINF_EM_RAW_GUEST_TRAP;
145 }
146# endif
147
148# else /* !PGM_WITH_PAGING */
149 PGSTPD pPDSrc = NULL;
150 const unsigned iPDSrc = 0;
151# endif /* !PGM_WITH_PAGING */
152
153
154# if PGM_SHW_TYPE == PGM_TYPE_32BIT
155 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT;
156 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVM->pgm.s);
157
158# elif PGM_SHW_TYPE == PGM_TYPE_PAE
159 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */
160
161# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
162 PX86PDPAE pPDDst;
163# if PGM_GST_TYPE != PGM_TYPE_PAE
164 X86PDPE PdpeSrc;
165
166 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
167 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
168# endif
169 rc = pgmShwSyncPaePDPtr(pVM, pvFault, &PdpeSrc, &pPDDst);
170 if (rc != VINF_SUCCESS)
171 {
172 AssertRC(rc);
173 return rc;
174 }
175 Assert(pPDDst);
176
177# else
178 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, pvFault);
179
180 /* Did we mark the PDPT as not present in SyncCR3? */
181 unsigned iPdpt = (pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
182 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
183 if (!pPdptDst->a[iPdpt].n.u1Present)
184 pPdptDst->a[iPdpt].n.u1Present = 1;
185# endif
186
187# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
188 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
189 PX86PDPAE pPDDst;
190# if PGM_GST_TYPE == PGM_TYPE_PROT
191 /* AMD-V nested paging */
192 X86PML4E Pml4eSrc;
193 X86PDPE PdpeSrc;
194 PX86PML4E pPml4eSrc = &Pml4eSrc;
195
196 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
197 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
198 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
199# endif
200
201 rc = pgmShwSyncLongModePDPtr(pVM, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
202 if (rc != VINF_SUCCESS)
203 {
204 AssertRC(rc);
205 return rc;
206 }
207 Assert(pPDDst);
208
209# elif PGM_SHW_TYPE == PGM_TYPE_EPT
210 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
211 PEPTPD pPDDst;
212
213 rc = pgmShwGetEPTPDPtr(pVM, pvFault, NULL, &pPDDst);
214 if (rc != VINF_SUCCESS)
215 {
216 AssertRC(rc);
217 return rc;
218 }
219 Assert(pPDDst);
220# endif
221
222# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
223 /*
224 * If we successfully correct the write protection fault due to dirty bit
225 * tracking, or this page fault is a genuine one, then return immediately.
226 */
227 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
228 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
229 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
230 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
231 || rc == VINF_EM_RAW_GUEST_TRAP)
232 {
233 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
234 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVM->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVM->pgm.s.StatRZTrap0eTime2GuestTrap; });
235 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
236 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
237 }
238
239 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0ePD[iPDSrc]);
240# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
241
242 /*
243 * A common case is the not-present error caused by lazy page table syncing.
244 *
245 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
246 * so we can safely assume that the shadow PT is present when calling SyncPage later.
247 *
248 * On failure, we ASSUME that SyncPT is out of memory or detected some kind
249 * of mapping conflict and defer to SyncCR3 in R3.
250 * (Again, we do NOT support access handlers for non-present guest pages.)
251 *
252 */
253# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
254 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
255# else
256 GSTPDE PdeSrc;
257 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
258 PdeSrc.n.u1Present = 1;
259 PdeSrc.n.u1Write = 1;
260 PdeSrc.n.u1Accessed = 1;
261 PdeSrc.n.u1User = 1;
262# endif
263 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
264 && !pPDDst->a[iPDDst].n.u1Present
265 && PdeSrc.n.u1Present
266 )
267
268 {
269 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2SyncPT; });
270 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
271 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
272 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, pvFault);
273 if (RT_SUCCESS(rc))
274 {
275 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
276 return rc;
277 }
278 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
279 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
280 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
281 return VINF_PGM_SYNC_CR3;
282 }
283
284# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
285 /*
286 * Check if this address is within any of our mappings.
287 *
288 * This is *very* fast and it's gonna save us a bit of effort below and prevent
289 * us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).
290 * (BTW, it's impossible to have physical access handlers in a mapping.)
291 */
292 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
293 {
294 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
295 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
296 for ( ; pMapping; pMapping = pMapping->CTX_SUFF(pNext))
297 {
298 if (pvFault < pMapping->GCPtr)
299 break;
300 if (pvFault - pMapping->GCPtr < pMapping->cb)
301 {
302 /*
303 * The first thing we check is if we've got an undetected conflict.
304 */
305 if (!pVM->pgm.s.fMappingsFixed)
306 {
307 unsigned iPT = pMapping->cb >> GST_PD_SHIFT;
308 while (iPT-- > 0)
309 if (pPDSrc->a[iPDSrc + iPT].n.u1Present)
310 {
311 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eConflicts);
312 Log(("Trap0e: Detected Conflict %RGv-%RGv\n", pMapping->GCPtr, pMapping->GCPtrLast));
313 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
314 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
315 return VINF_PGM_SYNC_CR3;
316 }
317 }
318
319 /*
320 * Check if the fault address is in a virtual page access handler range.
321 */
322 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->HyperVirtHandlers, pvFault);
323 if ( pCur
324 && pvFault - pCur->Core.Key < pCur->cb
325 && uErr & X86_TRAP_PF_RW)
326 {
327# ifdef IN_RC
328 STAM_PROFILE_START(&pCur->Stat, h);
329 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
330 STAM_PROFILE_STOP(&pCur->Stat, h);
331# else
332 AssertFailed();
333 rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
334# endif
335 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersMapping);
336 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
337 return rc;
338 }
339
340 /*
341 * Pretend we're not here and let the guest handle the trap.
342 */
343 TRPMSetErrorCode(pVM, uErr & ~X86_TRAP_PF_P);
344 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPFMapping);
345 LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));
346 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
347 return VINF_EM_RAW_GUEST_TRAP;
348 }
349 }
350 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
351 } /* pgmAreMappingsEnabled(&pVM->pgm.s) */
352# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
353
354 /*
355 * Check if this fault address is flagged for special treatment,
356 * which means we'll have to figure out the physical address and
357 * check flags associated with it.
358 *
359 * ASSUME that we can limit any special access handling to pages
360 * in page tables which the guest believes to be present.
361 */
362 if (PdeSrc.n.u1Present)
363 {
364 RTGCPHYS GCPhys = NIL_RTGCPHYS;
365
366# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
367# if PGM_GST_TYPE == PGM_TYPE_AMD64
368 bool fBigPagesSupported = true;
369# else
370 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
371# endif
372 if ( PdeSrc.b.u1Size
373 && fBigPagesSupported)
374 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc)
375 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
376 else
377 {
378 PGSTPT pPTSrc;
379 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
380 if (RT_SUCCESS(rc))
381 {
382 unsigned iPTESrc = (pvFault >> GST_PT_SHIFT) & GST_PT_MASK;
383 if (pPTSrc->a[iPTESrc].n.u1Present)
384 GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;
385 }
386 }
387# else
388 /* No paging so the fault address is the physical address */
389 GCPhys = (RTGCPHYS)(pvFault & ~PAGE_OFFSET_MASK);
390# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
391
392 /*
393 * If we have a GC address we'll check if it has any flags set.
394 */
395 if (GCPhys != NIL_RTGCPHYS)
396 {
397 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
398
399 PPGMPAGE pPage;
400 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
401 if (RT_SUCCESS(rc))
402 {
403 if ( PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage)
404 || PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
405 {
406 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
407 {
408 /*
409 * Physical page access handler.
410 */
411 const RTGCPHYS GCPhysFault = GCPhys | (pvFault & PAGE_OFFSET_MASK);
412 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
413 if (pCur)
414 {
415# ifdef PGM_SYNC_N_PAGES
416 /*
417 * If the region is write protected and we got a page not present fault, then sync
418 * the pages. If the fault was caused by a read, then restart the instruction.
419 * In case of write access continue to the GC write handler.
420 *
421 * ASSUMES that there is only one handler per page or that they have similar write properties.
422 */
423 if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
424 && !(uErr & X86_TRAP_PF_P))
425 {
426 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
427 if ( RT_FAILURE(rc)
428 || !(uErr & X86_TRAP_PF_RW)
429 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
430 {
431 AssertRC(rc);
432 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
433 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
434 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
435 return rc;
436 }
437 }
438# endif
439
440 AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
441 || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
442 ("Unexpected trap for physical handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
443
444# if defined(IN_RC) || defined(IN_RING0)
445 if (pCur->CTX_SUFF(pfnHandler))
446 {
447 STAM_PROFILE_START(&pCur->Stat, h);
448 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pCur->CTX_SUFF(pvUser));
449 STAM_PROFILE_STOP(&pCur->Stat, h);
450 }
451 else
452# endif
453 rc = VINF_EM_RAW_EMULATE_INSTR;
454 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersPhysical);
455 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
456 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndPhys; });
457 return rc;
458 }
459 }
460# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
461 else
462 {
463# ifdef PGM_SYNC_N_PAGES
464 /*
465 * If the region is write protected and we got a page not present fault, then sync
466 * the pages. If the fault was caused by a read, then restart the instruction.
467 * In case of write access continue to the GC write handler.
468 */
469 if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
470 && !(uErr & X86_TRAP_PF_P))
471 {
472 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
473 if ( RT_FAILURE(rc)
474 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
475 || !(uErr & X86_TRAP_PF_RW))
476 {
477 AssertRC(rc);
478 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
479 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
480 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; });
481 return rc;
482 }
483 }
484# endif
485 /*
486 * Ok, it's an virtual page access handler.
487 *
488 * Since it's faster to search by address, we'll do that first
489 * and then retry by GCPhys if that fails.
490 */
491 /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
492 /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
493 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
494 */
495 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
496 if (pCur)
497 {
498 AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb)
499 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
500 || !(uErr & X86_TRAP_PF_P)
501 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
502 ("Unexpected trap for virtual handler: %RGv (phys=%RGp) HCPhys=%HGp uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
503
504 if ( pvFault - pCur->Core.Key < pCur->cb
505 && ( uErr & X86_TRAP_PF_RW
506 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
507 {
508# ifdef IN_RC
509 STAM_PROFILE_START(&pCur->Stat, h);
510 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
511 STAM_PROFILE_STOP(&pCur->Stat, h);
512# else
513 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
514# endif
515 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtual);
516 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
517 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
518 return rc;
519 }
520 /* Unhandled part of a monitored page */
521 }
522 else
523 {
524 /* Check by physical address. */
525 PPGMVIRTHANDLER pCur;
526 unsigned iPage;
527 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK),
528 &pCur, &iPage);
529 Assert(RT_SUCCESS(rc) || !pCur);
530 if ( pCur
531 && ( uErr & X86_TRAP_PF_RW
532 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
533 {
534 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys);
535# ifdef IN_RC
536 RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK);
537 Assert(off < pCur->cb);
538 STAM_PROFILE_START(&pCur->Stat, h);
539 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off);
540 STAM_PROFILE_STOP(&pCur->Stat, h);
541# else
542 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
543# endif
544 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtualByPhys);
545 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
546 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
547 return rc;
548 }
549 }
550 }
551# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
552
553 /*
554 * There is a handled area of the page, but this fault doesn't belong to it.
555 * We must emulate the instruction.
556 *
557 * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
558 * we first check if this was a page-not-present fault for a page with only
559 * write access handlers. Restart the instruction if it wasn't a write access.
560 */
561 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersUnhandled);
562
563 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
564 && !(uErr & X86_TRAP_PF_P))
565 {
566 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
567 if ( RT_FAILURE(rc)
568 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
569 || !(uErr & X86_TRAP_PF_RW))
570 {
571 AssertRC(rc);
572 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
573 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
574 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
575 return rc;
576 }
577 }
578
579 /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
580 * It's writing to an unhandled part of the LDT page several million times.
581 */
582 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
583 LogFlow(("PGM: PGMInterpretInstruction -> rc=%d HCPhys=%RHp%s%s\n",
584 rc, pPage->HCPhys,
585 PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ? " phys" : "",
586 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ? " virt" : ""));
587 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
588 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndUnhandled; });
589 return rc;
590 } /* if any kind of handler */
591
592# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
593 if (uErr & X86_TRAP_PF_P)
594 {
595 /*
596 * The page isn't marked, but it might still be monitored by a virtual page access handler.
597 * (ASSUMES no temporary disabling of virtual handlers.)
598 */
599 /** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here,
600 * we should correct both the shadow page table and physical memory flags, and not only check for
601 * accesses within the handler region but for access to pages with virtual handlers. */
602 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
603 if (pCur)
604 {
605 AssertMsg( !(pvFault - pCur->Core.Key < pCur->cb)
606 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
607 || !(uErr & X86_TRAP_PF_P)
608 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
609 ("Unexpected trap for virtual handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
610
611 if ( pvFault - pCur->Core.Key < pCur->cb
612 && ( uErr & X86_TRAP_PF_RW
613 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
614 {
615# ifdef IN_RC
616 STAM_PROFILE_START(&pCur->Stat, h);
617 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
618 STAM_PROFILE_STOP(&pCur->Stat, h);
619# else
620 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
621# endif
622 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtualUnmarked);
623 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
624 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
625 return rc;
626 }
627 }
628 }
629# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
630 }
631 else
632 {
633 /* When the guest accesses invalid physical memory (e.g. probing of RAM or accessing a remapped MMIO range), then we'll fall
634 * back to the recompiler to emulate the instruction.
635 */
636 LogFlow(("pgmPhysGetPageEx %RGp failed with %Rrc\n", GCPhys, rc));
637 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersInvalid);
638 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
639 return VINF_EM_RAW_EMULATE_INSTR;
640 }
641
642 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
643
644# ifdef PGM_OUT_OF_SYNC_IN_GC
645 /*
646 * We are here only if page is present in Guest page tables and trap is not handled
647 * by our handlers.
648 * Check it for page out-of-sync situation.
649 */
650 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
651
652 if (!(uErr & X86_TRAP_PF_P))
653 {
654 /*
655 * Page is not present in our page tables.
656 * Try to sync it!
657 * BTW, fPageShw is invalid in this branch!
658 */
659 if (uErr & X86_TRAP_PF_US)
660 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
661 else /* supervisor */
662 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
663
664# if defined(LOG_ENABLED) && !defined(IN_RING0)
665 RTGCPHYS GCPhys;
666 uint64_t fPageGst;
667 PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
668 Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst=%08llx GCPhys=%RGp scan=%d\n",
669 pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst, GCPhys, CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)));
670# endif /* LOG_ENABLED */
671
672# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0)
673 if (CPUMGetGuestCPL(pVM, pRegFrame) == 0)
674 {
675 uint64_t fPageGst;
676 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
677 if ( RT_SUCCESS(rc)
678 && !(fPageGst & X86_PTE_US))
679 {
680 /* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */
681 if ( pvFault == (RTGCPTR)pRegFrame->eip
682 || pvFault - pRegFrame->eip < 8 /* instruction crossing a page boundary */
683# ifdef CSAM_DETECT_NEW_CODE_PAGES
684 || ( !PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip)
685 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)) /* any new code we encounter here */
686# endif /* CSAM_DETECT_NEW_CODE_PAGES */
687 )
688 {
689 LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip));
690 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip);
691 if (rc != VINF_SUCCESS)
692 {
693 /*
694 * CSAM needs to perform a job in ring 3.
695 *
696 * Sync the page before going to the host context; otherwise we'll end up in a loop if
697 * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
698 */
699 LogFlow(("CSAM ring 3 job\n"));
700 int rc2 = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, 1, uErr);
701 AssertRC(rc2);
702
703 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
704 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2CSAM; });
705 return rc;
706 }
707 }
708# ifdef CSAM_DETECT_NEW_CODE_PAGES
709 else if ( uErr == X86_TRAP_PF_RW
710 && pRegFrame->ecx >= 0x100 /* early check for movswd count */
711 && pRegFrame->ecx < 0x10000)
712 {
713 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions
714 * to detect loading of new code pages.
715 */
716
717 /*
718 * Decode the instruction.
719 */
720 RTGCPTR PC;
721 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
722 if (rc == VINF_SUCCESS)
723 {
724 DISCPUSTATE Cpu;
725 uint32_t cbOp;
726 rc = EMInterpretDisasOneEx(pVM, PC, pRegFrame, &Cpu, &cbOp);
727
728 /* For now we'll restrict this to rep movsw/d instructions */
729 if ( rc == VINF_SUCCESS
730 && Cpu.pCurInstr->opcode == OP_MOVSWD
731 && (Cpu.prefix & PREFIX_REP))
732 {
733 CSAMMarkPossibleCodePage(pVM, pvFault);
734 }
735 }
736 }
737# endif /* CSAM_DETECT_NEW_CODE_PAGES */
738
739 /*
740 * Mark this page as safe.
741 */
742 /** @todo not correct for pages that contain both code and data!! */
743 Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true));
744 CSAMMarkPage(pVM, (RTRCPTR)pvFault, true);
745 }
746 }
747# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
748 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
749 if (RT_SUCCESS(rc))
750 {
751 /* The page was successfully synced, return to the guest. */
752 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
753 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSync; });
754 return VINF_SUCCESS;
755 }
756 }
757 else
758 {
759 /*
760 * A side effect of not flushing global PDEs are out of sync pages due
761 * to physical monitored regions, that are no longer valid.
762 * Assume for now it only applies to the read/write flag
763 */
764 if (RT_SUCCESS(rc) && (uErr & X86_TRAP_PF_RW))
765 {
766 if (uErr & X86_TRAP_PF_US)
767 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
768 else /* supervisor */
769 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
770
771
772 /*
773 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the page is not present, which is not true in this case.
774 */
775 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, 1, uErr);
776 if (RT_SUCCESS(rc))
777 {
778 /*
779 * Page was successfully synced, return to guest.
780 */
781# ifdef VBOX_STRICT
782 RTGCPHYS GCPhys;
783 uint64_t fPageGst;
784 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
785 Assert(RT_SUCCESS(rc) && fPageGst & X86_PTE_RW);
786 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys, (uint64_t)fPageGst));
787
788 uint64_t fPageShw;
789 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
790 AssertMsg(RT_SUCCESS(rc) && fPageShw & X86_PTE_RW, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw));
791# endif /* VBOX_STRICT */
792 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
793 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndObs; });
794 return VINF_SUCCESS;
795 }
796
797 /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */
798 if ( CPUMGetGuestCPL(pVM, pRegFrame) == 0
799 && ((CPUMGetGuestCR0(pVM) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG)
800 && (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P))
801 {
802 uint64_t fPageGst;
803 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
804 if ( RT_SUCCESS(rc)
805 && !(fPageGst & X86_PTE_RW))
806 {
807 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
808 if (RT_SUCCESS(rc))
809 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eWPEmulInRZ);
810 else
811 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eWPEmulToR3);
812 return rc;
813 }
814 AssertMsgFailed(("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc));
815 }
816 }
817
818# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
819# ifdef VBOX_STRICT
820 /*
821 * Check for VMM page flags vs. Guest page flags consistency.
822 * Currently only for debug purposes.
823 */
824 if (RT_SUCCESS(rc))
825 {
826 /* Get guest page flags. */
827 uint64_t fPageGst;
828 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
829 if (RT_SUCCESS(rc))
830 {
831 uint64_t fPageShw;
832 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
833
834 /*
835 * Compare page flags.
836 * Note: we have AVL, A, D bits desynched.
837 */
838 AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)),
839 ("Page flags mismatch! pvFault=%RGv GCPhys=%RGp fPageShw=%08llx fPageGst=%08llx\n", pvFault, GCPhys, fPageShw, fPageGst));
840 }
841 else
842 AssertMsgFailed(("PGMGstGetPage rc=%Rrc\n", rc));
843 }
844 else
845 AssertMsgFailed(("PGMGCGetPage rc=%Rrc\n", rc));
846# endif /* VBOX_STRICT */
847# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
848 }
849 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
850# endif /* PGM_OUT_OF_SYNC_IN_GC */
851 }
852 else
853 {
854 /*
855 * Page not present in Guest OS or invalid page table address.
856 * This is potential virtual page access handler food.
857 *
858 * For the present we'll say that our access handlers don't
859 * work for this case - we've already discarded the page table
860 * not present case which is identical to this.
861 *
862 * When we perchance find we need this, we will probably have AVL
863 * trees (offset based) to operate on and we can measure their speed
864 * agains mapping a page table and probably rearrange this handling
865 * a bit. (Like, searching virtual ranges before checking the
866 * physical address.)
867 */
868 }
869 }
870
871
872# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
873 /*
874 * Conclusion, this is a guest trap.
875 */
876 LogFlow(("PGM: Unhandled #PF -> route trap to recompiler!\n"));
877 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPFUnh);
878 return VINF_EM_RAW_GUEST_TRAP;
879# else
880 /* present, but not a monitored page; perhaps the guest is probing physical memory */
881 return VINF_EM_RAW_EMULATE_INSTR;
882# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
883
884
885# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
886
887 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
888 return VERR_INTERNAL_ERROR;
889# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
890}
891#endif /* !IN_RING3 */
892
893
894/**
895 * Emulation of the invlpg instruction.
896 *
897 *
898 * @returns VBox status code.
899 *
900 * @param pVM VM handle.
901 * @param GCPtrPage Page to invalidate.
902 *
903 * @remark ASSUMES that the guest is updating before invalidating. This order
904 * isn't required by the CPU, so this is speculative and could cause
905 * trouble.
906 *
907 * @todo Flush page or page directory only if necessary!
908 * @todo Add a #define for simply invalidating the page.
909 */
910PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCPTR GCPtrPage)
911{
912#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \
913 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
914 && PGM_SHW_TYPE != PGM_TYPE_EPT
915 int rc;
916
917 LogFlow(("InvalidatePage %RGv\n", GCPtrPage));
918 /*
919 * Get the shadow PD entry and skip out if this PD isn't present.
920 * (Guessing that it is frequent for a shadow PDE to not be present, do this first.)
921 */
922# if PGM_SHW_TYPE == PGM_TYPE_32BIT
923 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
924 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
925
926# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
927 /* Fetch the pgm pool shadow descriptor. */
928 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
929 Assert(pShwPde);
930# endif
931
932# elif PGM_SHW_TYPE == PGM_TYPE_PAE
933 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT);
934 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
935
936 /* If the shadow PDPE isn't present, then skip the invalidate. */
937 if (!pPdptDst->a[iPdpt].n.u1Present)
938 {
939 Assert(!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING));
940 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
941 return VINF_SUCCESS;
942 }
943
944# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
945 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
946 PPGMPOOLPAGE pShwPde;
947 PX86PDPAE pPDDst;
948
949 /* Fetch the pgm pool shadow descriptor. */
950 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
951 AssertRCSuccessReturn(rc, rc);
952 Assert(pShwPde);
953
954 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
955 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
956# else
957 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - pool index only atm! */;
958 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
959# endif
960
961# else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
962 /* PML4 */
963 AssertReturn(pVM->pgm.s.pShwRootR3, VERR_INTERNAL_ERROR);
964
965 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;
966 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
967 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
968 PX86PDPAE pPDDst;
969 PX86PDPT pPdptDst;
970 PX86PML4E pPml4eDst;
971 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, &pPml4eDst, &pPdptDst, &pPDDst);
972 if (rc != VINF_SUCCESS)
973 {
974 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
975 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
976 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
977 PGM_INVL_GUEST_TLBS();
978 return VINF_SUCCESS;
979 }
980 Assert(pPDDst);
981
982 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
983 PX86PDPE pPdpeDst = &pPdptDst->a[iPdpt];
984
985 if (!pPdpeDst->n.u1Present)
986 {
987 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
988 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
989 PGM_INVL_GUEST_TLBS();
990 return VINF_SUCCESS;
991 }
992
993# endif /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
994
995 const SHWPDE PdeDst = *pPdeDst;
996 if (!PdeDst.n.u1Present)
997 {
998 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
999 return VINF_SUCCESS;
1000 }
1001
1002 /*
1003 * Get the guest PD entry and calc big page.
1004 */
1005# if PGM_GST_TYPE == PGM_TYPE_32BIT
1006 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
1007 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
1008 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
1009# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1010 unsigned iPDSrc;
1011# if PGM_GST_TYPE == PGM_TYPE_PAE
1012 X86PDPE PdpeSrc;
1013 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
1014# else /* AMD64 */
1015 PX86PML4E pPml4eSrc;
1016 X86PDPE PdpeSrc;
1017 PX86PDPAE pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
1018# endif
1019 GSTPDE PdeSrc;
1020
1021 if (pPDSrc)
1022 PdeSrc = pPDSrc->a[iPDSrc];
1023 else
1024 PdeSrc.u = 0;
1025# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1026
1027# if PGM_GST_TYPE == PGM_TYPE_AMD64
1028 const bool fIsBigPage = PdeSrc.b.u1Size;
1029# else
1030 const bool fIsBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1031# endif
1032
1033# ifdef IN_RING3
1034 /*
1035 * If a CR3 Sync is pending we may ignore the invalidate page operation
1036 * depending on the kind of sync and if it's a global page or not.
1037 * This doesn't make sense in GC/R0 so we'll skip it entirely there.
1038 */
1039# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
1040 if ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)
1041 || ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
1042 && fIsBigPage
1043 && PdeSrc.b.u1Global
1044 )
1045 )
1046# else
1047 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) )
1048# endif
1049 {
1050 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
1051 return VINF_SUCCESS;
1052 }
1053# endif /* IN_RING3 */
1054
1055# if PGM_GST_TYPE == PGM_TYPE_AMD64
1056 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1057
1058 /* Fetch the pgm pool shadow descriptor. */
1059 PPGMPOOLPAGE pShwPdpt = pgmPoolGetPageByHCPhys(pVM, pPml4eDst->u & X86_PML4E_PG_MASK);
1060 Assert(pShwPdpt);
1061
1062 /* Fetch the pgm pool shadow descriptor. */
1063 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & SHW_PDPE_PG_MASK);
1064 Assert(pShwPde);
1065
1066 Assert(pPml4eDst->n.u1Present && (pPml4eDst->u & SHW_PDPT_MASK));
1067 RTGCPHYS GCPhysPdpt = pPml4eSrc->u & X86_PML4E_PG_MASK;
1068
1069 if ( !pPml4eSrc->n.u1Present
1070 || pShwPdpt->GCPhys != GCPhysPdpt)
1071 {
1072 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1073 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1074 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1075 pPml4eDst->u = 0;
1076 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1077 PGM_INVL_GUEST_TLBS();
1078 return VINF_SUCCESS;
1079 }
1080 if ( pPml4eSrc->n.u1User != pPml4eDst->n.u1User
1081 || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write))
1082 {
1083 /*
1084 * Mark not present so we can resync the PML4E when it's used.
1085 */
1086 LogFlow(("InvalidatePage: Out-of-sync PML4E at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1087 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1088 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1089 pPml4eDst->u = 0;
1090 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1091 PGM_INVL_GUEST_TLBS();
1092 }
1093 else if (!pPml4eSrc->n.u1Accessed)
1094 {
1095 /*
1096 * Mark not present so we can set the accessed bit.
1097 */
1098 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1099 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1100 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1101 pPml4eDst->u = 0;
1102 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1103 PGM_INVL_GUEST_TLBS();
1104 }
1105
1106 /* Check if the PDPT entry has changed. */
1107 Assert(pPdpeDst->n.u1Present && pPdpeDst->u & SHW_PDPT_MASK);
1108 RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK;
1109 if ( !PdpeSrc.n.u1Present
1110 || pShwPde->GCPhys != GCPhysPd)
1111 {
1112 LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
1113 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1114 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1115 pPdpeDst->u = 0;
1116 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1117 PGM_INVL_GUEST_TLBS();
1118 return VINF_SUCCESS;
1119 }
1120 if ( PdpeSrc.lm.u1User != pPdpeDst->lm.u1User
1121 || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write))
1122 {
1123 /*
1124 * Mark not present so we can resync the PDPTE when it's used.
1125 */
1126 LogFlow(("InvalidatePage: Out-of-sync PDPE at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1127 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1128 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1129 pPdpeDst->u = 0;
1130 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1131 PGM_INVL_GUEST_TLBS();
1132 }
1133 else if (!PdpeSrc.lm.u1Accessed)
1134 {
1135 /*
1136 * Mark not present so we can set the accessed bit.
1137 */
1138 LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1139 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1140 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1141 pPdpeDst->u = 0;
1142 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1143 PGM_INVL_GUEST_TLBS();
1144 }
1145# endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
1146
1147# if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1148 /*
1149 * Update the shadow PDPE and free all the shadow PD entries if the PDPE is marked not present.
1150 * Note: This shouldn't actually be necessary as we monitor the PDPT page for changes.
1151 */
1152 if (!pPDSrc)
1153 {
1154 /* Guest PDPE not present */
1155 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, GCPtrPage);
1156 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1157
1158 Assert(!PdpeSrc.n.u1Present);
1159 LogFlow(("InvalidatePage: guest PDPE %d not present; clear shw pdpe\n", iPdpt));
1160
1161 /* for each page directory entry */
1162 for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++)
1163 {
1164 if ( pPDDst->a[iPD].n.u1Present
1165 && !(pPDDst->a[iPD].u & PGM_PDFLAGS_MAPPING))
1166 {
1167 pgmPoolFree(pVM, pPDDst->a[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
1168 pPDDst->a[iPD].u = 0;
1169 }
1170 }
1171 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
1172 pPdptDst->a[iPdpt].n.u1Present = 0;
1173 PGM_INVL_GUEST_TLBS();
1174 }
1175 AssertMsg(pVM->pgm.s.fMappingsFixed || (PdpeSrc.u & X86_PDPE_PG_MASK) == pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt], ("%RGp vs %RGp (mon)\n", (PdpeSrc.u & X86_PDPE_PG_MASK), pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt]));
1176# endif
1177
1178
1179 /*
1180 * Deal with the Guest PDE.
1181 */
1182 rc = VINF_SUCCESS;
1183 if (PdeSrc.n.u1Present)
1184 {
1185 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
1186 {
1187 /*
1188 * Conflict - Let SyncPT deal with it to avoid duplicate code.
1189 */
1190 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1191 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PAE);
1192 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
1193 }
1194 else if ( PdeSrc.n.u1User != PdeDst.n.u1User
1195 || (!PdeSrc.n.u1Write && PdeDst.n.u1Write))
1196 {
1197 /*
1198 * Mark not present so we can resync the PDE when it's used.
1199 */
1200 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1201 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1202# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1203 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1204# else
1205 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1206# endif
1207 pPdeDst->u = 0;
1208 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1209 PGM_INVL_GUEST_TLBS();
1210 }
1211 else if (!PdeSrc.n.u1Accessed)
1212 {
1213 /*
1214 * Mark not present so we can set the accessed bit.
1215 */
1216 LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1217 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1218# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1219 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1220# else
1221 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1222# endif
1223 pPdeDst->u = 0;
1224 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1225 PGM_INVL_GUEST_TLBS();
1226 }
1227 else if (!fIsBigPage)
1228 {
1229 /*
1230 * 4KB - page.
1231 */
1232 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1233 RTGCPHYS GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1234# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1235 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1236 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1237# endif
1238 if (pShwPage->GCPhys == GCPhys)
1239 {
1240# if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
1241 const unsigned iPTEDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1242 PSHWPT pPT = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1243 if (pPT->a[iPTEDst].n.u1Present)
1244 {
1245# ifdef PGMPOOL_WITH_USER_TRACKING
1246 /* This is very unlikely with caching/monitoring enabled. */
1247 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
1248# endif
1249 pPT->a[iPTEDst].u = 0;
1250 }
1251# else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
1252 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
1253 if (RT_SUCCESS(rc))
1254 rc = VINF_SUCCESS;
1255# endif
1256 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4KBPages));
1257 PGM_INVL_PG(GCPtrPage);
1258 }
1259 else
1260 {
1261 /*
1262 * The page table address changed.
1263 */
1264 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n",
1265 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
1266# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1267 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1268# else
1269 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1270# endif
1271 pPdeDst->u = 0;
1272 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1273 PGM_INVL_GUEST_TLBS();
1274 }
1275 }
1276 else
1277 {
1278 /*
1279 * 2/4MB - page.
1280 */
1281 /* Before freeing the page, check if anything really changed. */
1282 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1283 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1284# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1285 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1286 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1287# endif
1288 if ( pShwPage->GCPhys == GCPhys
1289 && pShwPage->enmKind == BTH_PGMPOOLKIND_PT_FOR_BIG)
1290 {
1291 /* ASSUMES a the given bits are identical for 4M and normal PDEs */
1292 /** @todo PAT */
1293 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1294 == (PdeDst.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1295 && ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
1296 || (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)))
1297 {
1298 LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
1299 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip));
1300 return VINF_SUCCESS;
1301 }
1302 }
1303
1304 /*
1305 * Ok, the page table is present and it's been changed in the guest.
1306 * If we're in host context, we'll just mark it as not present taking the lazy approach.
1307 * We could do this for some flushes in GC too, but we need an algorithm for
1308 * deciding which 4MB pages containing code likely to be executed very soon.
1309 */
1310 LogFlow(("InvalidatePage: Out-of-sync PD at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1311 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1312# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1313 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1314# else
1315 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1316# endif
1317 pPdeDst->u = 0;
1318 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages));
1319 PGM_INVL_BIG_PG(GCPtrPage);
1320 }
1321 }
1322 else
1323 {
1324 /*
1325 * Page directory is not present, mark shadow PDE not present.
1326 */
1327 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
1328 {
1329# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1330 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1331# else
1332 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1333# endif
1334 pPdeDst->u = 0;
1335 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1336 PGM_INVL_PG(GCPtrPage);
1337 }
1338 else
1339 {
1340 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1341 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDMappings));
1342 }
1343 }
1344
1345 return rc;
1346
1347#else /* guest real and protected mode */
1348 /* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */
1349 return VINF_SUCCESS;
1350#endif
1351}
1352
1353
1354#ifdef PGMPOOL_WITH_USER_TRACKING
1355/**
1356 * Update the tracking of shadowed pages.
1357 *
1358 * @param pVM The VM handle.
1359 * @param pShwPage The shadow page.
1360 * @param HCPhys The physical page we is being dereferenced.
1361 */
1362DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
1363{
1364# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1365 STAM_PROFILE_START(&pVM->pgm.s.StatTrackDeref, a);
1366 LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
1367
1368 /** @todo If this turns out to be a bottle neck (*very* likely) two things can be done:
1369 * 1. have a medium sized HCPhys -> GCPhys TLB (hash?)
1370 * 2. write protect all shadowed pages. I.e. implement caching.
1371 */
1372 /*
1373 * Find the guest address.
1374 */
1375 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1376 pRam;
1377 pRam = pRam->CTX_SUFF(pNext))
1378 {
1379 unsigned iPage = pRam->cb >> PAGE_SHIFT;
1380 while (iPage-- > 0)
1381 {
1382 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
1383 {
1384 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1385 pgmTrackDerefGCPhys(pPool, pShwPage, &pRam->aPages[iPage]);
1386 pShwPage->cPresent--;
1387 pPool->cPresent--;
1388 STAM_PROFILE_STOP(&pVM->pgm.s.StatTrackDeref, a);
1389 return;
1390 }
1391 }
1392 }
1393
1394 for (;;)
1395 AssertReleaseMsgFailed(("HCPhys=%RHp wasn't found!\n", HCPhys));
1396# else /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1397 pShwPage->cPresent--;
1398 pVM->pgm.s.CTX_SUFF(pPool)->cPresent--;
1399# endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1400}
1401
1402
1403/**
1404 * Update the tracking of shadowed pages.
1405 *
1406 * @param pVM The VM handle.
1407 * @param pShwPage The shadow page.
1408 * @param u16 The top 16-bit of the pPage->HCPhys.
1409 * @param pPage Pointer to the guest page. this will be modified.
1410 * @param iPTDst The index into the shadow table.
1411 */
1412DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVM pVM, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
1413{
1414# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1415 /*
1416 * Just deal with the simple first time here.
1417 */
1418 if (!u16)
1419 {
1420 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackVirgin);
1421 u16 = PGMPOOL_TD_MAKE(1, pShwPage->idx);
1422 }
1423 else
1424 u16 = pgmPoolTrackPhysExtAddref(pVM, u16, pShwPage->idx);
1425
1426 /* write back */
1427 Log2(("SyncPageWorkerTrackAddRef: u16=%#x->%#x iPTDst=%#x\n", u16, PGM_PAGE_GET_TRACKING(pPage), iPTDst));
1428 PGM_PAGE_SET_TRACKING(pPage, u16);
1429
1430# endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1431
1432 /* update statistics. */
1433 pVM->pgm.s.CTX_SUFF(pPool)->cPresent++;
1434 pShwPage->cPresent++;
1435 if (pShwPage->iFirstPresent > iPTDst)
1436 pShwPage->iFirstPresent = iPTDst;
1437}
1438#endif /* PGMPOOL_WITH_USER_TRACKING */
1439
1440
1441/**
1442 * Creates a 4K shadow page for a guest page.
1443 *
1444 * For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the
1445 * physical address. The PdeSrc argument only the flags are used. No page structured
1446 * will be mapped in this function.
1447 *
1448 * @param pVM VM handle.
1449 * @param pPteDst Destination page table entry.
1450 * @param PdeSrc Source page directory entry (i.e. Guest OS page directory entry).
1451 * Can safely assume that only the flags are being used.
1452 * @param PteSrc Source page table entry (i.e. Guest OS page table entry).
1453 * @param pShwPage Pointer to the shadow page.
1454 * @param iPTDst The index into the shadow table.
1455 *
1456 * @remark Not used for 2/4MB pages!
1457 */
1458DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVM pVM, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
1459{
1460 if (PteSrc.n.u1Present)
1461 {
1462 /*
1463 * Find the ram range.
1464 */
1465 PPGMPAGE pPage;
1466 int rc = pgmPhysGetPageEx(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK, &pPage);
1467 if (RT_SUCCESS(rc))
1468 {
1469 /** @todo investiage PWT, PCD and PAT. */
1470 /*
1471 * Make page table entry.
1472 */
1473 const RTHCPHYS HCPhys = pPage->HCPhys; /** @todo FLAGS */
1474 SHWPTE PteDst;
1475 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1476 {
1477 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
1478 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1479 {
1480#if PGM_SHW_TYPE == PGM_TYPE_EPT
1481 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage);
1482 PteDst.n.u1Present = 1;
1483 PteDst.n.u1Execute = 1;
1484 PteDst.n.u1IgnorePAT = 1;
1485 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1486 /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */
1487#else
1488 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1489 | (HCPhys & X86_PTE_PAE_PG_MASK);
1490#endif
1491 }
1492 else
1493 {
1494 LogFlow(("SyncPageWorker: monitored page (%RHp) -> mark not present\n", HCPhys));
1495 PteDst.u = 0;
1496 }
1497 /** @todo count these two kinds. */
1498 }
1499 else
1500 {
1501#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1502 /*
1503 * If the page or page directory entry is not marked accessed,
1504 * we mark the page not present.
1505 */
1506 if (!PteSrc.n.u1Accessed || !PdeSrc.n.u1Accessed)
1507 {
1508 LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n"));
1509 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,AccessedPage));
1510 PteDst.u = 0;
1511 }
1512 else
1513 /*
1514 * If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
1515 * when the page is modified.
1516 */
1517 if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))
1518 {
1519 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPage));
1520 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1521 | (HCPhys & X86_PTE_PAE_PG_MASK)
1522 | PGM_PTFLAGS_TRACK_DIRTY;
1523 }
1524 else
1525#endif
1526 {
1527 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageSkipped));
1528#if PGM_SHW_TYPE == PGM_TYPE_EPT
1529 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage);
1530 PteDst.n.u1Present = 1;
1531 PteDst.n.u1Write = 1;
1532 PteDst.n.u1Execute = 1;
1533 PteDst.n.u1IgnorePAT = 1;
1534 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1535 /* PteDst.n.u1Size = 0 */
1536#else
1537 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1538 | (HCPhys & X86_PTE_PAE_PG_MASK);
1539#endif
1540 }
1541 }
1542
1543#ifdef PGMPOOL_WITH_USER_TRACKING
1544 /*
1545 * Keep user track up to date.
1546 */
1547 if (PteDst.n.u1Present)
1548 {
1549 if (!pPteDst->n.u1Present)
1550 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1551 else if ((pPteDst->u & SHW_PTE_PG_MASK) != (PteDst.u & SHW_PTE_PG_MASK))
1552 {
1553 Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
1554 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1555 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1556 }
1557 }
1558 else if (pPteDst->n.u1Present)
1559 {
1560 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1561 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1562 }
1563#endif /* PGMPOOL_WITH_USER_TRACKING */
1564
1565 /*
1566 * Update statistics and commit the entry.
1567 */
1568#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1569 if (!PteSrc.n.u1Global)
1570 pShwPage->fSeenNonGlobal = true;
1571#endif
1572 *pPteDst = PteDst;
1573 }
1574 /* else MMIO or invalid page, we must handle them manually in the #PF handler. */
1575 /** @todo count these. */
1576 }
1577 else
1578 {
1579 /*
1580 * Page not-present.
1581 */
1582 LogFlow(("SyncPageWorker: page not present in Pte\n"));
1583#ifdef PGMPOOL_WITH_USER_TRACKING
1584 /* Keep user track up to date. */
1585 if (pPteDst->n.u1Present)
1586 {
1587 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1588 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1589 }
1590#endif /* PGMPOOL_WITH_USER_TRACKING */
1591 pPteDst->u = 0;
1592 /** @todo count these. */
1593 }
1594}
1595
1596
1597/**
1598 * Syncs a guest OS page.
1599 *
1600 * There are no conflicts at this point, neither is there any need for
1601 * page table allocations.
1602 *
1603 * @returns VBox status code.
1604 * @returns VINF_PGM_SYNCPAGE_MODIFIED_PDE if it modifies the PDE in any way.
1605 * @param pVM VM handle.
1606 * @param PdeSrc Page directory entry of the guest.
1607 * @param GCPtrPage Guest context page address.
1608 * @param cPages Number of pages to sync (PGM_SYNC_N_PAGES) (default=1).
1609 * @param uErr Fault error (X86_TRAP_PF_*).
1610 */
1611PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
1612{
1613 LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr));
1614
1615#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
1616 || PGM_GST_TYPE == PGM_TYPE_PAE \
1617 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
1618 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1619 && PGM_SHW_TYPE != PGM_TYPE_EPT
1620
1621# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1622 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
1623# endif
1624
1625 /*
1626 * Assert preconditions.
1627 */
1628 Assert(PdeSrc.n.u1Present);
1629 Assert(cPages);
1630 STAM_COUNTER_INC(&pVM->pgm.s.StatSyncPagePD[(GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK]);
1631
1632 /*
1633 * Get the shadow PDE, find the shadow page table in the pool.
1634 */
1635# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1636 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1637 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
1638
1639# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1640 /* Fetch the pgm pool shadow descriptor. */
1641 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
1642 Assert(pShwPde);
1643# endif
1644
1645# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1646
1647# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1648 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1649 PPGMPOOLPAGE pShwPde;
1650 PX86PDPAE pPDDst;
1651
1652 /* Fetch the pgm pool shadow descriptor. */
1653 int rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
1654 AssertRCSuccessReturn(rc, rc);
1655 Assert(pShwPde);
1656
1657 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
1658 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1659# else
1660 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
1661 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT);
1662 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
1663 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
1664 AssertReturn(pPdeDst, VERR_INTERNAL_ERROR);
1665# endif
1666# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1667 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1668 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1669 PX86PDPAE pPDDst;
1670 PX86PDPT pPdptDst;
1671
1672 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1673 AssertRCSuccessReturn(rc, rc);
1674 Assert(pPDDst && pPdptDst);
1675 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1676# endif
1677
1678 SHWPDE PdeDst = *pPdeDst;
1679 AssertMsg(PdeDst.n.u1Present, ("%p=%llx\n", pPdeDst, (uint64_t)PdeDst.u));
1680 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1681
1682# if PGM_GST_TYPE == PGM_TYPE_AMD64
1683 /* Fetch the pgm pool shadow descriptor. */
1684 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
1685 Assert(pShwPde);
1686# endif
1687
1688 /*
1689 * Check that the page is present and that the shadow PDE isn't out of sync.
1690 */
1691# if PGM_GST_TYPE == PGM_TYPE_AMD64
1692 const bool fBigPage = PdeSrc.b.u1Size;
1693# else
1694 const bool fBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1695# endif
1696 RTGCPHYS GCPhys;
1697 if (!fBigPage)
1698 {
1699 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1700# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1701 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1702 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1703# endif
1704 }
1705 else
1706 {
1707 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1708# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1709 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1710 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1711# endif
1712 }
1713 if ( pShwPage->GCPhys == GCPhys
1714 && PdeSrc.n.u1Present
1715 && (PdeSrc.n.u1User == PdeDst.n.u1User)
1716 && (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write)
1717# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1718 && (!fNoExecuteBitValid || PdeSrc.n.u1NoExecute == PdeDst.n.u1NoExecute)
1719# endif
1720 )
1721 {
1722 /*
1723 * Check that the PDE is marked accessed already.
1724 * Since we set the accessed bit *before* getting here on a #PF, this
1725 * check is only meant for dealing with non-#PF'ing paths.
1726 */
1727 if (PdeSrc.n.u1Accessed)
1728 {
1729 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1730 if (!fBigPage)
1731 {
1732 /*
1733 * 4KB Page - Map the guest page table.
1734 */
1735 PGSTPT pPTSrc;
1736 int rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
1737 if (RT_SUCCESS(rc))
1738 {
1739# ifdef PGM_SYNC_N_PAGES
1740 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1741 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1742 {
1743 /*
1744 * This code path is currently only taken when the caller is PGMTrap0eHandler
1745 * for non-present pages!
1746 *
1747 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1748 * deal with locality.
1749 */
1750 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1751# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1752 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1753 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
1754# else
1755 const unsigned offPTSrc = 0;
1756# endif
1757 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1758 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1759 iPTDst = 0;
1760 else
1761 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1762 for (; iPTDst < iPTDstEnd; iPTDst++)
1763 {
1764 if (!pPTDst->a[iPTDst].n.u1Present)
1765 {
1766 GSTPTE PteSrc = pPTSrc->a[offPTSrc + iPTDst];
1767 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
1768 NOREF(GCPtrCurPage);
1769#ifndef IN_RING0
1770 /*
1771 * Assuming kernel code will be marked as supervisor - and not as user level
1772 * and executed using a conforming code selector - And marked as readonly.
1773 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
1774 */
1775 PPGMPAGE pPage;
1776 if ( ((PdeSrc.u & PteSrc.u) & (X86_PTE_RW | X86_PTE_US))
1777 || iPTDst == ((GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK) /* always sync GCPtrPage */
1778 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)GCPtrCurPage)
1779 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
1780 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1781 )
1782#endif /* else: CSAM not active */
1783 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1784 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1785 GCPtrCurPage, PteSrc.n.u1Present,
1786 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1787 PteSrc.n.u1User & PdeSrc.n.u1User,
1788 (uint64_t)PteSrc.u,
1789 (uint64_t)pPTDst->a[iPTDst].u,
1790 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1791 }
1792 }
1793 }
1794 else
1795# endif /* PGM_SYNC_N_PAGES */
1796 {
1797 const unsigned iPTSrc = (GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK;
1798 GSTPTE PteSrc = pPTSrc->a[iPTSrc];
1799 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1800 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1801 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s\n",
1802 GCPtrPage, PteSrc.n.u1Present,
1803 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1804 PteSrc.n.u1User & PdeSrc.n.u1User,
1805 (uint64_t)PteSrc.u,
1806 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1807 }
1808 }
1809 else /* MMIO or invalid page: emulated in #PF handler. */
1810 {
1811 LogFlow(("PGM_GCPHYS_2_PTR %RGp failed with %Rrc\n", GCPhys, rc));
1812 Assert(!pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK].n.u1Present);
1813 }
1814 }
1815 else
1816 {
1817 /*
1818 * 4/2MB page - lazy syncing shadow 4K pages.
1819 * (There are many causes of getting here, it's no longer only CSAM.)
1820 */
1821 /* Calculate the GC physical address of this 4KB shadow page. */
1822 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
1823 /* Find ram range. */
1824 PPGMPAGE pPage;
1825 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1826 if (RT_SUCCESS(rc))
1827 {
1828 /*
1829 * Make shadow PTE entry.
1830 */
1831 SHWPTE PteDst;
1832 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1833 | PGM_PAGE_GET_HCPHYS(pPage);
1834 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1835 {
1836 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1837 PteDst.n.u1Write = 0;
1838 else
1839 PteDst.u = 0;
1840 }
1841 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1842# ifdef PGMPOOL_WITH_USER_TRACKING
1843 if (PteDst.n.u1Present && !pPTDst->a[iPTDst].n.u1Present)
1844 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
1845# endif
1846 pPTDst->a[iPTDst] = PteDst;
1847
1848
1849 /*
1850 * If the page is not flagged as dirty and is writable, then make it read-only
1851 * at PD level, so we can set the dirty bit when the page is modified.
1852 *
1853 * ASSUMES that page access handlers are implemented on page table entry level.
1854 * Thus we will first catch the dirty access and set PDE.D and restart. If
1855 * there is an access handler, we'll trap again and let it work on the problem.
1856 */
1857 /** @todo r=bird: figure out why we need this here, SyncPT should've taken care of this already.
1858 * As for invlpg, it simply frees the whole shadow PT.
1859 * ...It's possibly because the guest clears it and the guest doesn't really tell us... */
1860 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
1861 {
1862 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
1863 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
1864 PdeDst.n.u1Write = 0;
1865 }
1866 else
1867 {
1868 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
1869 PdeDst.n.u1Write = PdeSrc.n.u1Write;
1870 }
1871 *pPdeDst = PdeDst;
1872 Log2(("SyncPage: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} GCPhys=%RGp%s\n",
1873 GCPtrPage, PdeSrc.n.u1Present, PdeSrc.n.u1Write, PdeSrc.n.u1User, (uint64_t)PdeSrc.u, GCPhys,
1874 PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1875 }
1876 else
1877 LogFlow(("PGM_GCPHYS_2_PTR %RGp (big) failed with %Rrc\n", GCPhys, rc));
1878 }
1879 return VINF_SUCCESS;
1880 }
1881 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPagePDNAs));
1882 }
1883 else
1884 {
1885 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync));
1886 Log2(("SyncPage: Out-Of-Sync PDE at %RGp PdeSrc=%RX64 PdeDst=%RX64 (GCPhys %RGp vs %RGp)\n",
1887 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys));
1888 }
1889
1890 /*
1891 * Mark the PDE not present. Restart the instruction and let #PF call SyncPT.
1892 * Yea, I'm lazy.
1893 */
1894 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1895# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1896 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
1897# else
1898 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPDDst);
1899# endif
1900
1901 pPdeDst->u = 0;
1902 PGM_INVL_GUEST_TLBS();
1903 return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
1904
1905#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
1906 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1907 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
1908
1909# ifdef PGM_SYNC_N_PAGES
1910 /*
1911 * Get the shadow PDE, find the shadow page table in the pool.
1912 */
1913# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1914 X86PDE PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage);
1915
1916# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1917 X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);
1918
1919# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1920 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1921 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; NOREF(iPdpt);
1922 PX86PDPAE pPDDst;
1923 X86PDEPAE PdeDst;
1924 PX86PDPT pPdptDst;
1925
1926 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1927 AssertRCSuccessReturn(rc, rc);
1928 Assert(pPDDst && pPdptDst);
1929 PdeDst = pPDDst->a[iPDDst];
1930# elif PGM_SHW_TYPE == PGM_TYPE_EPT
1931 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1932 PEPTPD pPDDst;
1933 EPTPDE PdeDst;
1934
1935 int rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, NULL, &pPDDst);
1936 if (rc != VINF_SUCCESS)
1937 {
1938 AssertRC(rc);
1939 return rc;
1940 }
1941 Assert(pPDDst);
1942 PdeDst = pPDDst->a[iPDDst];
1943# endif
1944 AssertMsg(PdeDst.n.u1Present, ("%#llx\n", (uint64_t)PdeDst.u));
1945 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1946 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1947
1948 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1949 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1950 {
1951 /*
1952 * This code path is currently only taken when the caller is PGMTrap0eHandler
1953 * for non-present pages!
1954 *
1955 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1956 * deal with locality.
1957 */
1958 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1959 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1960 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1961 iPTDst = 0;
1962 else
1963 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1964 for (; iPTDst < iPTDstEnd; iPTDst++)
1965 {
1966 if (!pPTDst->a[iPTDst].n.u1Present)
1967 {
1968 GSTPTE PteSrc;
1969
1970 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
1971
1972 /* Fake the page table entry */
1973 PteSrc.u = GCPtrCurPage;
1974 PteSrc.n.u1Present = 1;
1975 PteSrc.n.u1Dirty = 1;
1976 PteSrc.n.u1Accessed = 1;
1977 PteSrc.n.u1Write = 1;
1978 PteSrc.n.u1User = 1;
1979
1980 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1981
1982 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1983 GCPtrCurPage, PteSrc.n.u1Present,
1984 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1985 PteSrc.n.u1User & PdeSrc.n.u1User,
1986 (uint64_t)PteSrc.u,
1987 (uint64_t)pPTDst->a[iPTDst].u,
1988 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1989 }
1990 else
1991 Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, pPTDst->a[iPTDst].u));
1992 }
1993 }
1994 else
1995# endif /* PGM_SYNC_N_PAGES */
1996 {
1997 GSTPTE PteSrc;
1998 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1999 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
2000
2001 /* Fake the page table entry */
2002 PteSrc.u = GCPtrCurPage;
2003 PteSrc.n.u1Present = 1;
2004 PteSrc.n.u1Dirty = 1;
2005 PteSrc.n.u1Accessed = 1;
2006 PteSrc.n.u1Write = 1;
2007 PteSrc.n.u1User = 1;
2008 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2009
2010 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}PteDst=%08llx%s\n",
2011 GCPtrPage, PteSrc.n.u1Present,
2012 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2013 PteSrc.n.u1User & PdeSrc.n.u1User,
2014 (uint64_t)PteSrc.u,
2015 (uint64_t)pPTDst->a[iPTDst].u,
2016 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2017 }
2018 return VINF_SUCCESS;
2019
2020#else
2021 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
2022 return VERR_INTERNAL_ERROR;
2023#endif
2024}
2025
2026
2027#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2028/**
2029 * Investigate page fault and handle write protection page faults caused by
2030 * dirty bit tracking.
2031 *
2032 * @returns VBox status code.
2033 * @param pVM VM handle.
2034 * @param uErr Page fault error code.
2035 * @param pPdeDst Shadow page directory entry.
2036 * @param pPdeSrc Guest page directory entry.
2037 * @param GCPtrPage Guest context page address.
2038 */
2039PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
2040{
2041 bool fWriteProtect = !!(CPUMGetGuestCR0(pVM) & X86_CR0_WP);
2042 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US);
2043 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW);
2044# if PGM_GST_TYPE == PGM_TYPE_AMD64
2045 bool fBigPagesSupported = true;
2046# else
2047 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
2048# endif
2049# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2050 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
2051# endif
2052 unsigned uPageFaultLevel;
2053 int rc;
2054
2055 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2056 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
2057
2058# if PGM_GST_TYPE == PGM_TYPE_PAE \
2059 || PGM_GST_TYPE == PGM_TYPE_AMD64
2060
2061# if PGM_GST_TYPE == PGM_TYPE_AMD64
2062 PX86PML4E pPml4eSrc;
2063 PX86PDPE pPdpeSrc;
2064
2065 pPdpeSrc = pgmGstGetLongModePDPTPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc);
2066 Assert(pPml4eSrc);
2067
2068 /*
2069 * Real page fault? (PML4E level)
2070 */
2071 if ( (uErr & X86_TRAP_PF_RSVD)
2072 || !pPml4eSrc->n.u1Present
2073 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPml4eSrc->n.u1NoExecute)
2074 || (fWriteFault && !pPml4eSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2075 || (fUserLevelFault && !pPml4eSrc->n.u1User)
2076 )
2077 {
2078 uPageFaultLevel = 0;
2079 goto l_UpperLevelPageFault;
2080 }
2081 Assert(pPdpeSrc);
2082
2083# else /* PAE */
2084 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(&pVM->pgm.s, GCPtrPage);
2085# endif /* PAE */
2086
2087 /*
2088 * Real page fault? (PDPE level)
2089 */
2090 if ( (uErr & X86_TRAP_PF_RSVD)
2091 || !pPdpeSrc->n.u1Present
2092# if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */
2093 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdpeSrc->lm.u1NoExecute)
2094 || (fWriteFault && !pPdpeSrc->lm.u1Write && (fUserLevelFault || fWriteProtect))
2095 || (fUserLevelFault && !pPdpeSrc->lm.u1User)
2096# endif
2097 )
2098 {
2099 uPageFaultLevel = 1;
2100 goto l_UpperLevelPageFault;
2101 }
2102# endif
2103
2104 /*
2105 * Real page fault? (PDE level)
2106 */
2107 if ( (uErr & X86_TRAP_PF_RSVD)
2108 || !pPdeSrc->n.u1Present
2109# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2110 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdeSrc->n.u1NoExecute)
2111# endif
2112 || (fWriteFault && !pPdeSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2113 || (fUserLevelFault && !pPdeSrc->n.u1User) )
2114 {
2115 uPageFaultLevel = 2;
2116 goto l_UpperLevelPageFault;
2117 }
2118
2119 /*
2120 * First check the easy case where the page directory has been marked read-only to track
2121 * the dirty bit of an emulated BIG page
2122 */
2123 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2124 {
2125 /* Mark guest page directory as accessed */
2126# if PGM_GST_TYPE == PGM_TYPE_AMD64
2127 pPml4eSrc->n.u1Accessed = 1;
2128 pPdpeSrc->lm.u1Accessed = 1;
2129# endif
2130 pPdeSrc->b.u1Accessed = 1;
2131
2132 /*
2133 * Only write protection page faults are relevant here.
2134 */
2135 if (fWriteFault)
2136 {
2137 /* Mark guest page directory as dirty (BIG page only). */
2138 pPdeSrc->b.u1Dirty = 1;
2139
2140 if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
2141 {
2142 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2143
2144 Assert(pPdeSrc->b.u1Write);
2145
2146 pPdeDst->n.u1Write = 1;
2147 pPdeDst->n.u1Accessed = 1;
2148 pPdeDst->au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
2149 PGM_INVL_BIG_PG(GCPtrPage);
2150 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2151 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2152 }
2153 }
2154 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2155 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2156 }
2157 /* else: 4KB page table */
2158
2159 /*
2160 * Map the guest page table.
2161 */
2162 PGSTPT pPTSrc;
2163 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2164 if (RT_SUCCESS(rc))
2165 {
2166 /*
2167 * Real page fault?
2168 */
2169 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2170 const GSTPTE PteSrc = *pPteSrc;
2171 if ( !PteSrc.n.u1Present
2172# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2173 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && PteSrc.n.u1NoExecute)
2174# endif
2175 || (fWriteFault && !PteSrc.n.u1Write && (fUserLevelFault || fWriteProtect))
2176 || (fUserLevelFault && !PteSrc.n.u1User)
2177 )
2178 {
2179 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2180 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2181 LogFlow(("CheckPageFault: real page fault at %RGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u));
2182
2183 /* Check the present bit as the shadow tables can cause different error codes by being out of sync.
2184 * See the 2nd case above as well.
2185 */
2186 if (pPdeSrc->n.u1Present && pPteSrc->n.u1Present)
2187 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2188
2189 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2190 return VINF_EM_RAW_GUEST_TRAP;
2191 }
2192 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
2193
2194 /*
2195 * Set the accessed bits in the page directory and the page table.
2196 */
2197# if PGM_GST_TYPE == PGM_TYPE_AMD64
2198 pPml4eSrc->n.u1Accessed = 1;
2199 pPdpeSrc->lm.u1Accessed = 1;
2200# endif
2201 pPdeSrc->n.u1Accessed = 1;
2202 pPteSrc->n.u1Accessed = 1;
2203
2204 /*
2205 * Only write protection page faults are relevant here.
2206 */
2207 if (fWriteFault)
2208 {
2209 /* Write access, so mark guest entry as dirty. */
2210# ifdef VBOX_WITH_STATISTICS
2211 if (!pPteSrc->n.u1Dirty)
2212 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtiedPage));
2213 else
2214 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageAlreadyDirty));
2215# endif
2216
2217 pPteSrc->n.u1Dirty = 1;
2218
2219 if (pPdeDst->n.u1Present)
2220 {
2221#ifndef IN_RING0
2222 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.
2223 * Our individual shadow handlers will provide more information and force a fatal exit.
2224 */
2225 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))
2226 {
2227 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage));
2228 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2229 return VINF_SUCCESS;
2230 }
2231#endif
2232 /*
2233 * Map shadow page table.
2234 */
2235 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2236 if (pShwPage)
2237 {
2238 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2239 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2240 if ( pPteDst->n.u1Present /** @todo Optimize accessed bit emulation? */
2241 && (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY))
2242 {
2243 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
2244# ifdef VBOX_STRICT
2245 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
2246 if (pPage)
2247 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
2248 ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
2249# endif
2250 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2251
2252 Assert(pPteSrc->n.u1Write);
2253
2254 pPteDst->n.u1Write = 1;
2255 pPteDst->n.u1Dirty = 1;
2256 pPteDst->n.u1Accessed = 1;
2257 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY;
2258 PGM_INVL_PG(GCPtrPage);
2259
2260 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2261 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2262 }
2263 }
2264 else
2265 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));
2266 }
2267 }
2268/** @todo Optimize accessed bit emulation? */
2269# ifdef VBOX_STRICT
2270 /*
2271 * Sanity check.
2272 */
2273 else if ( !pPteSrc->n.u1Dirty
2274 && (pPdeSrc->n.u1Write & pPteSrc->n.u1Write)
2275 && pPdeDst->n.u1Present)
2276 {
2277 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2278 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2279 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2280 if ( pPteDst->n.u1Present
2281 && pPteDst->n.u1Write)
2282 LogFlow(("Writable present page %RGv not marked for dirty bit tracking!!!\n", GCPtrPage));
2283 }
2284# endif /* VBOX_STRICT */
2285 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2286 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2287 }
2288 AssertRC(rc);
2289 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2290 return rc;
2291
2292
2293l_UpperLevelPageFault:
2294 /*
2295 * Pagefault detected while checking the PML4E, PDPE or PDE.
2296 * Single exit handler to get rid of duplicate code paths.
2297 */
2298 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2299 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2300 Log(("CheckPageFault: real page fault at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
2301
2302 if (
2303# if PGM_GST_TYPE == PGM_TYPE_AMD64
2304 pPml4eSrc->n.u1Present &&
2305# endif
2306# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
2307 pPdpeSrc->n.u1Present &&
2308# endif
2309 pPdeSrc->n.u1Present)
2310 {
2311 /* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
2312 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2313 {
2314 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2315 }
2316 else
2317 {
2318 /*
2319 * Map the guest page table.
2320 */
2321 PGSTPT pPTSrc;
2322 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2323 if (RT_SUCCESS(rc))
2324 {
2325 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2326 const GSTPTE PteSrc = *pPteSrc;
2327 if (pPteSrc->n.u1Present)
2328 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2329 }
2330 AssertRC(rc);
2331 }
2332 }
2333 return VINF_EM_RAW_GUEST_TRAP;
2334}
2335#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
2336
2337
2338/**
2339 * Sync a shadow page table.
2340 *
2341 * The shadow page table is not present. This includes the case where
2342 * there is a conflict with a mapping.
2343 *
2344 * @returns VBox status code.
2345 * @param pVM VM handle.
2346 * @param iPD Page directory index.
2347 * @param pPDSrc Source page directory (i.e. Guest OS page directory).
2348 * Assume this is a temporary mapping.
2349 * @param GCPtrPage GC Pointer of the page that caused the fault
2350 */
2351PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
2352{
2353 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2354 STAM_COUNTER_INC(&pVM->pgm.s.StatSyncPtPD[iPDSrc]);
2355 LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage));
2356
2357#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
2358 || PGM_GST_TYPE == PGM_TYPE_PAE \
2359 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2360 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2361 && PGM_SHW_TYPE != PGM_TYPE_EPT
2362
2363 int rc = VINF_SUCCESS;
2364
2365 /*
2366 * Validate input a little bit.
2367 */
2368 AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%RGv\n", iPDSrc, GCPtrPage));
2369# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2370 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
2371 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
2372
2373# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2374 /* Fetch the pgm pool shadow descriptor. */
2375 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
2376 Assert(pShwPde);
2377# endif
2378
2379# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2380# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2381 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2382 PPGMPOOLPAGE pShwPde;
2383 PX86PDPAE pPDDst;
2384 PSHWPDE pPdeDst;
2385
2386 /* Fetch the pgm pool shadow descriptor. */
2387 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
2388 AssertRCSuccessReturn(rc, rc);
2389 Assert(pShwPde);
2390
2391 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2392 pPdeDst = &pPDDst->a[iPDDst];
2393# else
2394 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
2395 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpt);
2396 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
2397 PSHWPDE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
2398# endif
2399# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2400 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2401 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2402 PX86PDPAE pPDDst;
2403 PX86PDPT pPdptDst;
2404 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2405 AssertRCSuccessReturn(rc, rc);
2406 Assert(pPDDst);
2407 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2408# endif
2409 SHWPDE PdeDst = *pPdeDst;
2410
2411# if PGM_GST_TYPE == PGM_TYPE_AMD64
2412 /* Fetch the pgm pool shadow descriptor. */
2413 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2414 Assert(pShwPde);
2415# endif
2416
2417# ifndef PGM_WITHOUT_MAPPINGS
2418 /*
2419 * Check for conflicts.
2420 * GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
2421 * HC: Simply resolve the conflict.
2422 */
2423 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
2424 {
2425 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
2426# ifndef IN_RING3
2427 Log(("SyncPT: Conflict at %RGv\n", GCPtrPage));
2428 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2429 return VERR_ADDRESS_CONFLICT;
2430# else
2431 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage);
2432 Assert(pMapping);
2433# if PGM_GST_TYPE == PGM_TYPE_32BIT
2434 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2435# elif PGM_GST_TYPE == PGM_TYPE_PAE
2436 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2437# else
2438 AssertFailed(); /* can't happen for amd64 */
2439# endif
2440 if (RT_FAILURE(rc))
2441 {
2442 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2443 return rc;
2444 }
2445 PdeDst = *pPdeDst;
2446# endif
2447 }
2448# else /* PGM_WITHOUT_MAPPINGS */
2449 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
2450# endif /* PGM_WITHOUT_MAPPINGS */
2451 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2452
2453# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2454 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
2455 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
2456# endif
2457
2458 /*
2459 * Sync page directory entry.
2460 */
2461 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2462 if (PdeSrc.n.u1Present)
2463 {
2464 /*
2465 * Allocate & map the page table.
2466 */
2467 PSHWPT pPTDst;
2468# if PGM_GST_TYPE == PGM_TYPE_AMD64
2469 const bool fPageTable = !PdeSrc.b.u1Size;
2470# else
2471 const bool fPageTable = !PdeSrc.b.u1Size || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
2472# endif
2473 PPGMPOOLPAGE pShwPage;
2474 RTGCPHYS GCPhys;
2475 if (fPageTable)
2476 {
2477 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
2478# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2479 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2480 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);
2481# endif
2482# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2483 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2484# else
2485 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2486# endif
2487 }
2488 else
2489 {
2490 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
2491# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2492 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
2493 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
2494# endif
2495# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2496 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx, iPDDst, &pShwPage);
2497# else
2498 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2499# endif
2500 }
2501 if (rc == VINF_SUCCESS)
2502 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2503 else if (rc == VINF_PGM_CACHED_PAGE)
2504 {
2505 /*
2506 * The PT was cached, just hook it up.
2507 */
2508 if (fPageTable)
2509 PdeDst.u = pShwPage->Core.Key
2510 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2511 else
2512 {
2513 PdeDst.u = pShwPage->Core.Key
2514 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2515 /* (see explanation and assumptions further down.) */
2516 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2517 {
2518 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2519 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2520 PdeDst.b.u1Write = 0;
2521 }
2522 }
2523 *pPdeDst = PdeDst;
2524# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2525 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2526# endif
2527 return VINF_SUCCESS;
2528 }
2529 else if (rc == VERR_PGM_POOL_FLUSHED)
2530 {
2531 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
2532# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2533 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2534# endif
2535 return VINF_PGM_SYNC_CR3;
2536 }
2537 else
2538 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2539 PdeDst.u &= X86_PDE_AVL_MASK;
2540 PdeDst.u |= pShwPage->Core.Key;
2541
2542 /*
2543 * Page directory has been accessed (this is a fault situation, remember).
2544 */
2545 pPDSrc->a[iPDSrc].n.u1Accessed = 1;
2546 if (fPageTable)
2547 {
2548 /*
2549 * Page table - 4KB.
2550 *
2551 * Sync all or just a few entries depending on PGM_SYNC_N_PAGES.
2552 */
2553 Log2(("SyncPT: 4K %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx}\n",
2554 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u));
2555 PGSTPT pPTSrc;
2556 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
2557 if (RT_SUCCESS(rc))
2558 {
2559 /*
2560 * Start by syncing the page directory entry so CSAM's TLB trick works.
2561 */
2562 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | X86_PDE_AVL_MASK))
2563 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2564 *pPdeDst = PdeDst;
2565# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2566 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2567# endif
2568
2569 /*
2570 * Directory/page user or supervisor privilege: (same goes for read/write)
2571 *
2572 * Directory Page Combined
2573 * U/S U/S U/S
2574 * 0 0 0
2575 * 0 1 0
2576 * 1 0 0
2577 * 1 1 1
2578 *
2579 * Simple AND operation. Table listed for completeness.
2580 *
2581 */
2582 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT4K));
2583# ifdef PGM_SYNC_N_PAGES
2584 unsigned iPTBase = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2585 unsigned iPTDst = iPTBase;
2586 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
2587 if (iPTDst <= PGM_SYNC_NR_PAGES / 2)
2588 iPTDst = 0;
2589 else
2590 iPTDst -= PGM_SYNC_NR_PAGES / 2;
2591# else /* !PGM_SYNC_N_PAGES */
2592 unsigned iPTDst = 0;
2593 const unsigned iPTDstEnd = RT_ELEMENTS(pPTDst->a);
2594# endif /* !PGM_SYNC_N_PAGES */
2595# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2596 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2597 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
2598# else
2599 const unsigned offPTSrc = 0;
2600# endif
2601 for (; iPTDst < iPTDstEnd; iPTDst++)
2602 {
2603 const unsigned iPTSrc = iPTDst + offPTSrc;
2604 const GSTPTE PteSrc = pPTSrc->a[iPTSrc];
2605
2606 if (PteSrc.n.u1Present) /* we've already cleared it above */
2607 {
2608# ifndef IN_RING0
2609 /*
2610 * Assuming kernel code will be marked as supervisor - and not as user level
2611 * and executed using a conforming code selector - And marked as readonly.
2612 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
2613 */
2614 PPGMPAGE pPage;
2615 if ( ((PdeSrc.u & pPTSrc->a[iPTSrc].u) & (X86_PTE_RW | X86_PTE_US))
2616 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)))
2617 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
2618 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2619 )
2620# endif
2621 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2622 Log2(("SyncPT: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n",
2623 (RTGCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)),
2624 PteSrc.n.u1Present,
2625 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2626 PteSrc.n.u1User & PdeSrc.n.u1User,
2627 (uint64_t)PteSrc.u,
2628 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],
2629 (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) ));
2630 }
2631 } /* for PTEs */
2632 }
2633 }
2634 else
2635 {
2636 /*
2637 * Big page - 2/4MB.
2638 *
2639 * We'll walk the ram range list in parallel and optimize lookups.
2640 * We will only sync on shadow page table at a time.
2641 */
2642 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT4M));
2643
2644 /**
2645 * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
2646 */
2647
2648 /*
2649 * Start by syncing the page directory entry.
2650 */
2651 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | (X86_PDE_AVL_MASK & ~PGM_PDFLAGS_TRACK_DIRTY)))
2652 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2653
2654 /*
2655 * If the page is not flagged as dirty and is writable, then make it read-only
2656 * at PD level, so we can set the dirty bit when the page is modified.
2657 *
2658 * ASSUMES that page access handlers are implemented on page table entry level.
2659 * Thus we will first catch the dirty access and set PDE.D and restart. If
2660 * there is an access handler, we'll trap again and let it work on the problem.
2661 */
2662 /** @todo move the above stuff to a section in the PGM documentation. */
2663 Assert(!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY));
2664 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2665 {
2666 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2667 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2668 PdeDst.b.u1Write = 0;
2669 }
2670 *pPdeDst = PdeDst;
2671# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2672 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2673# endif
2674
2675 /*
2676 * Fill the shadow page table.
2677 */
2678 /* Get address and flags from the source PDE. */
2679 SHWPTE PteDstBase;
2680 PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
2681
2682 /* Loop thru the entries in the shadow PT. */
2683 const RTGCPTR GCPtr = (GCPtrPage >> SHW_PD_SHIFT) << SHW_PD_SHIFT; NOREF(GCPtr);
2684 Log2(("SyncPT: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} Shw=%RGv GCPhys=%RGp %s\n",
2685 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr,
2686 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2687 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2688 unsigned iPTDst = 0;
2689 while (iPTDst < RT_ELEMENTS(pPTDst->a))
2690 {
2691 /* Advance ram range list. */
2692 while (pRam && GCPhys > pRam->GCPhysLast)
2693 pRam = pRam->CTX_SUFF(pNext);
2694 if (pRam && GCPhys >= pRam->GCPhys)
2695 {
2696 unsigned iHCPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2697 do
2698 {
2699 /* Make shadow PTE. */
2700 PPGMPAGE pPage = &pRam->aPages[iHCPage];
2701 SHWPTE PteDst;
2702
2703 /* Make sure the RAM has already been allocated. */
2704 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) /** @todo PAGE FLAGS */
2705 {
2706 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
2707 {
2708# ifdef IN_RING3
2709 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
2710# else
2711 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2712# endif
2713 if (rc != VINF_SUCCESS)
2714 return rc;
2715 }
2716 }
2717
2718 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2719 {
2720 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
2721 {
2722 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2723 PteDst.n.u1Write = 0;
2724 }
2725 else
2726 PteDst.u = 0;
2727 }
2728# ifndef IN_RING0
2729 /*
2730 * Assuming kernel code will be marked as supervisor and not as user level and executed
2731 * using a conforming code selector. Don't check for readonly, as that implies the whole
2732 * 4MB can be code or readonly data. Linux enables write access for its large pages.
2733 */
2734 else if ( !PdeSrc.n.u1User
2735 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))))
2736 PteDst.u = 0;
2737# endif
2738 else
2739 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2740# ifdef PGMPOOL_WITH_USER_TRACKING
2741 if (PteDst.n.u1Present)
2742 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
2743# endif
2744 /* commit it */
2745 pPTDst->a[iPTDst] = PteDst;
2746 Log4(("SyncPT: BIG %RGv PteDst:{P=%d RW=%d U=%d raw=%08llx}%s\n",
2747 (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,
2748 PteDst.u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2749
2750 /* advance */
2751 GCPhys += PAGE_SIZE;
2752 iHCPage++;
2753 iPTDst++;
2754 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2755 && GCPhys <= pRam->GCPhysLast);
2756 }
2757 else if (pRam)
2758 {
2759 Log(("Invalid pages at %RGp\n", GCPhys));
2760 do
2761 {
2762 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2763 GCPhys += PAGE_SIZE;
2764 iPTDst++;
2765 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2766 && GCPhys < pRam->GCPhys);
2767 }
2768 else
2769 {
2770 Log(("Invalid pages at %RGp (2)\n", GCPhys));
2771 for ( ; iPTDst < RT_ELEMENTS(pPTDst->a); iPTDst++)
2772 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2773 }
2774 } /* while more PTEs */
2775 } /* 4KB / 4MB */
2776 }
2777 else
2778 AssertRelease(!PdeDst.n.u1Present);
2779
2780 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2781 if (RT_FAILURE(rc))
2782 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPTFailed));
2783 return rc;
2784
2785#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
2786 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2787 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
2788
2789
2790 /*
2791 * Validate input a little bit.
2792 */
2793 int rc = VINF_SUCCESS;
2794# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2795 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2796 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
2797
2798# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2799 /* Fetch the pgm pool shadow descriptor. */
2800 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
2801 Assert(pShwPde);
2802# endif
2803
2804# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2805# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2806 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2807 PPGMPOOLPAGE pShwPde;
2808 PX86PDPAE pPDDst;
2809 PSHWPDE pPdeDst;
2810
2811 /* Fetch the pgm pool shadow descriptor. */
2812 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
2813 AssertRCSuccessReturn(rc, rc);
2814 Assert(pShwPde);
2815
2816 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2817 pPdeDst = &pPDDst->a[iPDDst];
2818# else
2819 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm!*/;
2820 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
2821# endif
2822
2823# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2824 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2825 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2826 PX86PDPAE pPDDst;
2827 PX86PDPT pPdptDst;
2828 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2829 AssertRCSuccessReturn(rc, rc);
2830 Assert(pPDDst);
2831 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2832
2833 /* Fetch the pgm pool shadow descriptor. */
2834 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2835 Assert(pShwPde);
2836
2837# elif PGM_SHW_TYPE == PGM_TYPE_EPT
2838 const unsigned iPdpt = (GCPtrPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
2839 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2840 PEPTPD pPDDst;
2841 PEPTPDPT pPdptDst;
2842
2843 rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
2844 if (rc != VINF_SUCCESS)
2845 {
2846 AssertRC(rc);
2847 return rc;
2848 }
2849 Assert(pPDDst);
2850 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2851
2852 /* Fetch the pgm pool shadow descriptor. */
2853 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & EPT_PDPTE_PG_MASK);
2854 Assert(pShwPde);
2855# endif
2856 SHWPDE PdeDst = *pPdeDst;
2857
2858 Assert(!(PdeDst.u & PGM_PDFLAGS_MAPPING));
2859 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2860
2861 GSTPDE PdeSrc;
2862 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2863 PdeSrc.n.u1Present = 1;
2864 PdeSrc.n.u1Write = 1;
2865 PdeSrc.n.u1Accessed = 1;
2866 PdeSrc.n.u1User = 1;
2867
2868 /*
2869 * Allocate & map the page table.
2870 */
2871 PSHWPT pPTDst;
2872 PPGMPOOLPAGE pShwPage;
2873 RTGCPHYS GCPhys;
2874
2875 /* Virtual address = physical address */
2876 GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK;
2877# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_EPT || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2878 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2879# else
2880 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2881# endif
2882
2883 if ( rc == VINF_SUCCESS
2884 || rc == VINF_PGM_CACHED_PAGE)
2885 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2886 else
2887 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2888
2889 PdeDst.u &= X86_PDE_AVL_MASK;
2890 PdeDst.u |= pShwPage->Core.Key;
2891 PdeDst.n.u1Present = 1;
2892 PdeDst.n.u1Write = 1;
2893# if PGM_SHW_TYPE == PGM_TYPE_EPT
2894 PdeDst.n.u1Execute = 1;
2895# else
2896 PdeDst.n.u1User = 1;
2897 PdeDst.n.u1Accessed = 1;
2898# endif
2899 *pPdeDst = PdeDst;
2900
2901 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
2902 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2903 return rc;
2904
2905#else
2906 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE));
2907 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2908 return VERR_INTERNAL_ERROR;
2909#endif
2910}
2911
2912
2913
2914/**
2915 * Prefetch a page/set of pages.
2916 *
2917 * Typically used to sync commonly used pages before entering raw mode
2918 * after a CR3 reload.
2919 *
2920 * @returns VBox status code.
2921 * @param pVM VM handle.
2922 * @param GCPtrPage Page to invalidate.
2923 */
2924PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCPTR GCPtrPage)
2925{
2926#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2927 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
2928 /*
2929 * Check that all Guest levels thru the PDE are present, getting the
2930 * PD and PDE in the processes.
2931 */
2932 int rc = VINF_SUCCESS;
2933# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2934# if PGM_GST_TYPE == PGM_TYPE_32BIT
2935 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
2936 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
2937# elif PGM_GST_TYPE == PGM_TYPE_PAE
2938 unsigned iPDSrc;
2939# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2940 X86PDPE PdpeSrc;
2941 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
2942# else
2943 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
2944# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
2945 if (!pPDSrc)
2946 return VINF_SUCCESS; /* not present */
2947# elif PGM_GST_TYPE == PGM_TYPE_AMD64
2948 unsigned iPDSrc;
2949 PX86PML4E pPml4eSrc;
2950 X86PDPE PdpeSrc;
2951 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
2952 if (!pPDSrc)
2953 return VINF_SUCCESS; /* not present */
2954# endif
2955 const GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2956# else
2957 PGSTPD pPDSrc = NULL;
2958 const unsigned iPDSrc = 0;
2959 GSTPDE PdeSrc;
2960
2961 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2962 PdeSrc.n.u1Present = 1;
2963 PdeSrc.n.u1Write = 1;
2964 PdeSrc.n.u1Accessed = 1;
2965 PdeSrc.n.u1User = 1;
2966# endif
2967
2968 if (PdeSrc.n.u1Present && PdeSrc.n.u1Accessed)
2969 {
2970# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2971 const X86PDE PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage);
2972# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2973# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2974 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2975 PX86PDPAE pPDDst;
2976 X86PDEPAE PdeDst;
2977# if PGM_GST_TYPE != PGM_TYPE_PAE
2978 X86PDPE PdpeSrc;
2979
2980 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
2981 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
2982# endif
2983 int rc = pgmShwSyncPaePDPtr(pVM, GCPtrPage, &PdpeSrc, &pPDDst);
2984 if (rc != VINF_SUCCESS)
2985 {
2986 AssertRC(rc);
2987 return rc;
2988 }
2989 Assert(pPDDst);
2990 PdeDst = pPDDst->a[iPDDst];
2991# else
2992 const X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);
2993# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
2994
2995# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2996 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2997 PX86PDPAE pPDDst;
2998 X86PDEPAE PdeDst;
2999
3000# if PGM_GST_TYPE == PGM_TYPE_PROT
3001 /* AMD-V nested paging */
3002 X86PML4E Pml4eSrc;
3003 X86PDPE PdpeSrc;
3004 PX86PML4E pPml4eSrc = &Pml4eSrc;
3005
3006 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3007 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3008 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3009# endif
3010
3011 int rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3012 if (rc != VINF_SUCCESS)
3013 {
3014 AssertRC(rc);
3015 return rc;
3016 }
3017 Assert(pPDDst);
3018 PdeDst = pPDDst->a[iPDDst];
3019# endif
3020 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
3021 {
3022 if (!PdeDst.n.u1Present)
3023 /** r=bird: This guy will set the A bit on the PDE, probably harmless. */
3024 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
3025 else
3026 {
3027 /** @note We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
3028 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
3029 * makes no sense to prefetch more than one page.
3030 */
3031 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
3032 if (RT_SUCCESS(rc))
3033 rc = VINF_SUCCESS;
3034 }
3035 }
3036 }
3037 return rc;
3038
3039#elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3040 return VINF_SUCCESS; /* ignore */
3041#endif
3042}
3043
3044
3045
3046
3047/**
3048 * Syncs a page during a PGMVerifyAccess() call.
3049 *
3050 * @returns VBox status code (informational included).
3051 * @param GCPtrPage The address of the page to sync.
3052 * @param fPage The effective guest page flags.
3053 * @param uErr The trap error code.
3054 */
3055PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
3056{
3057 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));
3058
3059 Assert(!HWACCMIsNestedPagingActive(pVM));
3060#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
3061 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3062
3063# ifndef IN_RING0
3064 if (!(fPage & X86_PTE_US))
3065 {
3066 /*
3067 * Mark this page as safe.
3068 */
3069 /** @todo not correct for pages that contain both code and data!! */
3070 Log(("CSAMMarkPage %RGv; scanned=%d\n", GCPtrPage, true));
3071 CSAMMarkPage(pVM, (RTRCPTR)GCPtrPage, true);
3072 }
3073# endif
3074
3075 /*
3076 * Get guest PD and index.
3077 */
3078# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3079# if PGM_GST_TYPE == PGM_TYPE_32BIT
3080 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
3081 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3082# elif PGM_GST_TYPE == PGM_TYPE_PAE
3083 unsigned iPDSrc;
3084# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3085 X86PDPE PdpeSrc;
3086 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
3087# else
3088 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
3089# endif
3090
3091 if (pPDSrc)
3092 {
3093 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3094 return VINF_EM_RAW_GUEST_TRAP;
3095 }
3096# elif PGM_GST_TYPE == PGM_TYPE_AMD64
3097 unsigned iPDSrc;
3098 PX86PML4E pPml4eSrc;
3099 X86PDPE PdpeSrc;
3100 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3101 if (!pPDSrc)
3102 {
3103 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3104 return VINF_EM_RAW_GUEST_TRAP;
3105 }
3106# endif
3107# else
3108 PGSTPD pPDSrc = NULL;
3109 const unsigned iPDSrc = 0;
3110# endif
3111 int rc = VINF_SUCCESS;
3112
3113 /*
3114 * First check if the shadow pd is present.
3115 */
3116# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3117 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
3118# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3119 PX86PDEPAE pPdeDst;
3120# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3121 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3122 PX86PDPAE pPDDst;
3123# if PGM_GST_TYPE != PGM_TYPE_PAE
3124 X86PDPE PdpeSrc;
3125
3126 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
3127 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
3128# endif
3129 rc = pgmShwSyncPaePDPtr(pVM, GCPtrPage, &PdpeSrc, &pPDDst);
3130 if (rc != VINF_SUCCESS)
3131 {
3132 AssertRC(rc);
3133 return rc;
3134 }
3135 Assert(pPDDst);
3136 pPdeDst = &pPDDst->a[iPDDst];
3137# else
3138 pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
3139# endif
3140# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3141 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3142 PX86PDPAE pPDDst;
3143 PX86PDEPAE pPdeDst;
3144
3145# if PGM_GST_TYPE == PGM_TYPE_PROT
3146 /* AMD-V nested paging */
3147 X86PML4E Pml4eSrc;
3148 X86PDPE PdpeSrc;
3149 PX86PML4E pPml4eSrc = &Pml4eSrc;
3150
3151 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3152 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3153 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3154# endif
3155
3156 rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3157 if (rc != VINF_SUCCESS)
3158 {
3159 AssertRC(rc);
3160 return rc;
3161 }
3162 Assert(pPDDst);
3163 pPdeDst = &pPDDst->a[iPDDst];
3164# endif
3165 if (!pPdeDst->n.u1Present)
3166 {
3167 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
3168 AssertRC(rc);
3169 if (rc != VINF_SUCCESS)
3170 return rc;
3171 }
3172
3173# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3174 /* Check for dirty bit fault */
3175 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
3176 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
3177 Log(("PGMVerifyAccess: success (dirty)\n"));
3178 else
3179 {
3180 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
3181#else
3182 {
3183 GSTPDE PdeSrc;
3184 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
3185 PdeSrc.n.u1Present = 1;
3186 PdeSrc.n.u1Write = 1;
3187 PdeSrc.n.u1Accessed = 1;
3188 PdeSrc.n.u1User = 1;
3189
3190#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
3191 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
3192 if (uErr & X86_TRAP_PF_US)
3193 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
3194 else /* supervisor */
3195 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
3196
3197 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
3198 if (RT_SUCCESS(rc))
3199 {
3200 /* Page was successfully synced */
3201 Log2(("PGMVerifyAccess: success (sync)\n"));
3202 rc = VINF_SUCCESS;
3203 }
3204 else
3205 {
3206 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", GCPtrPage, rc));
3207 return VINF_EM_RAW_GUEST_TRAP;
3208 }
3209 }
3210 return rc;
3211
3212#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3213
3214 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
3215 return VERR_INTERNAL_ERROR;
3216#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3217}
3218
3219
3220#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
3221# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
3222/**
3223 * Figures out which kind of shadow page this guest PDE warrants.
3224 *
3225 * @returns Shadow page kind.
3226 * @param pPdeSrc The guest PDE in question.
3227 * @param cr4 The current guest cr4 value.
3228 */
3229DECLINLINE(PGMPOOLKIND) PGM_BTH_NAME(CalcPageKind)(const GSTPDE *pPdeSrc, uint32_t cr4)
3230{
3231# if PMG_GST_TYPE == PGM_TYPE_AMD64
3232 if (!pPdeSrc->n.u1Size)
3233# else
3234 if (!pPdeSrc->n.u1Size || !(cr4 & X86_CR4_PSE))
3235# endif
3236 return BTH_PGMPOOLKIND_PT_FOR_PT;
3237 //switch (pPdeSrc->u & (X86_PDE4M_RW | X86_PDE4M_US /*| X86_PDE4M_PAE_NX*/))
3238 //{
3239 // case 0:
3240 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RO;
3241 // case X86_PDE4M_RW:
3242 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW;
3243 // case X86_PDE4M_US:
3244 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US;
3245 // case X86_PDE4M_RW | X86_PDE4M_US:
3246 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US;
3247# if 0
3248 // case X86_PDE4M_PAE_NX:
3249 // return BTH_PGMPOOLKIND_PT_FOR_BIG_NX;
3250 // case X86_PDE4M_RW | X86_PDE4M_PAE_NX:
3251 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_NX;
3252 // case X86_PDE4M_US | X86_PDE4M_PAE_NX:
3253 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US_NX;
3254 // case X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PAE_NX:
3255 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US_NX;
3256# endif
3257 return BTH_PGMPOOLKIND_PT_FOR_BIG;
3258 //}
3259}
3260# endif
3261#endif
3262
3263#undef MY_STAM_COUNTER_INC
3264#define MY_STAM_COUNTER_INC(a) do { } while (0)
3265
3266
3267/**
3268 * Syncs the paging hierarchy starting at CR3.
3269 *
3270 * @returns VBox status code, no specials.
3271 * @param pVM The virtual machine.
3272 * @param cr0 Guest context CR0 register
3273 * @param cr3 Guest context CR3 register
3274 * @param cr4 Guest context CR4 register
3275 * @param fGlobal Including global page directories or not
3276 */
3277PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
3278{
3279 if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
3280 fGlobal = true; /* Change this CR3 reload to be a global one. */
3281
3282 LogFlow(("SyncCR3 %d\n", fGlobal));
3283
3284#if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3285 /*
3286 * Update page access handlers.
3287 * The virtual are always flushed, while the physical are only on demand.
3288 * WARNING: We are incorrectly not doing global flushing on Virtual Handler updates. We'll
3289 * have to look into that later because it will have a bad influence on the performance.
3290 * @note SvL: There's no need for that. Just invalidate the virtual range(s).
3291 * bird: Yes, but that won't work for aliases.
3292 */
3293 /** @todo this MUST go away. See #1557. */
3294 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3295 PGM_GST_NAME(HandlerVirtualUpdate)(pVM, cr4);
3296 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3297#endif
3298
3299#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3300 /*
3301 * Nested / EPT - almost no work.
3302 */
3303 /** @todo check if this is really necessary; the call does it as well... */
3304 HWACCMFlushTLB(pVM);
3305 return VINF_SUCCESS;
3306
3307#elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3308 /*
3309 * AMD64 (Shw & Gst) - No need to check all paging levels; we zero
3310 * out the shadow parts when the guest modifies its tables.
3311 */
3312 return VINF_SUCCESS;
3313
3314#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3315
3316# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3317# ifdef PGM_WITHOUT_MAPPINGS
3318 Assert(pVM->pgm.s.fMappingsFixed);
3319 return VINF_SUCCESS;
3320# else
3321 /* Nothing to do when mappings are fixed. */
3322 if (pVM->pgm.s.fMappingsFixed)
3323 return VINF_SUCCESS;
3324
3325 int rc = PGMMapResolveConflicts(pVM);
3326 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
3327 if (rc == VINF_PGM_SYNC_CR3)
3328 {
3329 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3330 return VINF_PGM_SYNC_CR3;
3331 }
3332# endif
3333 return VINF_SUCCESS;
3334# else
3335 /*
3336 * PAE and 32-bit legacy mode (shadow).
3337 * (Guest PAE, 32-bit legacy, protected and real modes.)
3338 */
3339 Assert(fGlobal || (cr4 & X86_CR4_PGE));
3340 MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Global) : &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3NotGlobal));
3341
3342# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
3343 bool const fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3344
3345 /*
3346 * Get page directory addresses.
3347 */
3348# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3349 PX86PDE pPDEDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, 0);
3350# else /* PGM_SHW_TYPE == PGM_TYPE_PAE */
3351# if PGM_GST_TYPE == PGM_TYPE_32BIT
3352 PX86PDEPAE pPDEDst = NULL;
3353# endif
3354# endif
3355
3356# if PGM_GST_TYPE == PGM_TYPE_32BIT
3357 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3358 Assert(pPDSrc);
3359# if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3360 Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == (RTR3PTR)pPDSrc);
3361# endif
3362# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
3363
3364 /*
3365 * Iterate the the CR3 page.
3366 */
3367 PPGMMAPPING pMapping;
3368 unsigned iPdNoMapping;
3369 const bool fRawR0Enabled = EMIsRawRing0Enabled(pVM);
3370 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3371
3372 /* Only check mappings if they are supposed to be put into the shadow page table. */
3373 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
3374 {
3375 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3376 iPdNoMapping = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U;
3377 }
3378 else
3379 {
3380 pMapping = 0;
3381 iPdNoMapping = ~0U;
3382 }
3383
3384# if PGM_GST_TYPE == PGM_TYPE_PAE
3385 for (uint64_t iPdpt = 0; iPdpt < GST_PDPE_ENTRIES; iPdpt++)
3386 {
3387 unsigned iPDSrc;
3388 X86PDPE PdpeSrc;
3389 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT, &iPDSrc, &PdpeSrc);
3390 PX86PDEPAE pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT);
3391 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
3392
3393 if (pPDSrc == NULL)
3394 {
3395 /* PDPE not present */
3396 if (pPdptDst->a[iPdpt].n.u1Present)
3397 {
3398 LogFlow(("SyncCR3: guest PDPE %lld not present; clear shw pdpe\n", iPdpt));
3399 /* for each page directory entry */
3400 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3401 {
3402 if ( pPDEDst[iPD].n.u1Present
3403 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING))
3404 {
3405 pgmPoolFree(pVM, pPDEDst[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
3406 pPDEDst[iPD].u = 0;
3407 }
3408 }
3409 }
3410 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
3411 pPdptDst->a[iPdpt].n.u1Present = 0;
3412 continue;
3413 }
3414# else /* PGM_GST_TYPE != PGM_TYPE_PAE */
3415 {
3416# endif /* PGM_GST_TYPE != PGM_TYPE_PAE */
3417 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3418 {
3419# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3420 if ((iPD & 255) == 0) /* Start of new PD. */
3421 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)iPD << GST_PD_SHIFT);
3422# endif
3423# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3424 Assert(pgmShwGet32BitPDEPtr(&pVM->pgm.s, (uint32_t)iPD << SHW_PD_SHIFT) == pPDEDst);
3425# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3426# if defined(VBOX_STRICT) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* Unfortuantely not reliable with PGMR0DynMap and multiple VMs. */
3427 RTGCPTR GCPtrStrict = (uint32_t)iPD << GST_PD_SHIFT;
3428# if PGM_GST_TYPE == PGM_TYPE_PAE
3429 GCPtrStrict |= iPdpt << X86_PDPT_SHIFT;
3430# endif
3431 AssertMsg(pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict) == pPDEDst, ("%p vs %p (%RGv)\n", pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict), pPDEDst, GCPtrStrict));
3432# endif /* VBOX_STRICT */
3433# endif
3434 GSTPDE PdeSrc = pPDSrc->a[iPD];
3435 if ( PdeSrc.n.u1Present
3436 && (PdeSrc.n.u1User || fRawR0Enabled))
3437 {
3438# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3439 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3440 && !defined(PGM_WITHOUT_MAPPINGS)
3441
3442 /*
3443 * Check for conflicts with GC mappings.
3444 */
3445# if PGM_GST_TYPE == PGM_TYPE_PAE
3446 if (iPD + iPdpt * X86_PG_PAE_ENTRIES == iPdNoMapping)
3447# else
3448 if (iPD == iPdNoMapping)
3449# endif
3450 {
3451 if (pVM->pgm.s.fMappingsFixed)
3452 {
3453 /* It's fixed, just skip the mapping. */
3454 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3455 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
3456 iPD += cPTs - 1;
3457# if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
3458 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
3459# else
3460 pPDEDst += cPTs;
3461# endif
3462 pMapping = pMapping->CTX_SUFF(pNext);
3463 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3464 continue;
3465 }
3466# ifdef IN_RING3
3467# if PGM_GST_TYPE == PGM_TYPE_32BIT
3468 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3469# elif PGM_GST_TYPE == PGM_TYPE_PAE
3470 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3471# endif
3472 if (RT_FAILURE(rc))
3473 return rc;
3474
3475 /*
3476 * Update iPdNoMapping and pMapping.
3477 */
3478 pMapping = pVM->pgm.s.pMappingsR3;
3479 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3480 pMapping = pMapping->pNextR3;
3481 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3482# else /* !IN_RING3 */
3483 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3484 return VINF_PGM_SYNC_CR3;
3485# endif /* !IN_RING3 */
3486 }
3487# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3488 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3489# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3490
3491 /*
3492 * Sync page directory entry.
3493 *
3494 * The current approach is to allocated the page table but to set
3495 * the entry to not-present and postpone the page table synching till
3496 * it's actually used.
3497 */
3498# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3499 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3500# elif PGM_GST_TYPE == PGM_TYPE_PAE
3501 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES; NOREF(iPdShw);
3502# else
3503 const unsigned iPdShw = iPD; NOREF(iPdShw);
3504# endif
3505 {
3506 SHWPDE PdeDst = *pPDEDst;
3507 if (PdeDst.n.u1Present)
3508 {
3509 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
3510 RTGCPHYS GCPhys;
3511 if ( !PdeSrc.b.u1Size
3512 || !fBigPagesSupported)
3513 {
3514 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
3515# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3516 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
3517 GCPhys |= i * (PAGE_SIZE / 2);
3518# endif
3519 }
3520 else
3521 {
3522 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
3523# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3524 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
3525 GCPhys |= i * X86_PAGE_2M_SIZE;
3526# endif
3527 }
3528
3529 if ( pShwPage->GCPhys == GCPhys
3530 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)
3531 && ( pShwPage->fCached
3532 || ( !fGlobal
3533 && ( false
3534# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
3535 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3536 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */
3537 || ( !pShwPage->fSeenNonGlobal
3538 && (cr4 & X86_CR4_PGE))
3539# endif
3540 )
3541 )
3542 )
3543 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))
3544 || ( fBigPagesSupported
3545 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)
3546 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))
3547 )
3548 )
3549 {
3550# ifdef VBOX_WITH_STATISTICS
3551 if ( !fGlobal
3552 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3553 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))
3554 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPD));
3555 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))
3556 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPT));
3557 else
3558 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstCacheHit));
3559# endif /* VBOX_WITH_STATISTICS */
3560 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
3561 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
3562 //# ifdef PGMPOOL_WITH_CACHE
3563 // pgmPoolCacheUsed(pPool, pShwPage);
3564 //# endif
3565 }
3566 else
3567 {
3568 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);
3569 pPDEDst->u = 0;
3570 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreed));
3571 }
3572 }
3573 else
3574 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstNotPresent));
3575
3576 /* advance */
3577 pPDEDst++;
3578 } /* foreach 2MB PAE PDE in 4MB guest PDE */
3579 }
3580# if PGM_GST_TYPE == PGM_TYPE_PAE
3581 else if (iPD + iPdpt * X86_PG_PAE_ENTRIES != iPdNoMapping)
3582# else
3583 else if (iPD != iPdNoMapping)
3584# endif
3585 {
3586 /*
3587 * Check if there is any page directory to mark not present here.
3588 */
3589# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3590 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3591# elif PGM_GST_TYPE == PGM_TYPE_PAE
3592 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES;
3593# else
3594 const unsigned iPdShw = iPD;
3595# endif
3596 {
3597 if (pPDEDst->n.u1Present)
3598 {
3599 pgmPoolFree(pVM, pPDEDst->u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdShw);
3600 pPDEDst->u = 0;
3601 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreedSrcNP));
3602 }
3603 pPDEDst++;
3604 }
3605 }
3606 else
3607 {
3608# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3609 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3610 && !defined(PGM_WITHOUT_MAPPINGS)
3611
3612 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3613
3614 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3615 if (pVM->pgm.s.fMappingsFixed)
3616 {
3617 /* It's fixed, just skip the mapping. */
3618 pMapping = pMapping->CTX_SUFF(pNext);
3619 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3620 }
3621 else
3622 {
3623 /*
3624 * Check for conflicts for subsequent pagetables
3625 * and advance to the next mapping.
3626 */
3627 iPdNoMapping = ~0U;
3628 unsigned iPT = cPTs;
3629 while (iPT-- > 1)
3630 {
3631 if ( pPDSrc->a[iPD + iPT].n.u1Present
3632 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))
3633 {
3634# ifdef IN_RING3
3635# if PGM_GST_TYPE == PGM_TYPE_32BIT
3636 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3637# elif PGM_GST_TYPE == PGM_TYPE_PAE
3638 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3639# endif
3640 if (RT_FAILURE(rc))
3641 return rc;
3642
3643 /*
3644 * Update iPdNoMapping and pMapping.
3645 */
3646 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3647 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3648 pMapping = pMapping->CTX_SUFF(pNext);
3649 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3650 break;
3651# else
3652 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3653 return VINF_PGM_SYNC_CR3;
3654# endif
3655 }
3656 }
3657 if (iPdNoMapping == ~0U && pMapping)
3658 {
3659 pMapping = pMapping->CTX_SUFF(pNext);
3660 if (pMapping)
3661 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT;
3662 }
3663 }
3664
3665 /* advance. */
3666 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
3667 iPD += cPTs - 1;
3668# if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
3669 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
3670# else
3671 pPDEDst += cPTs;
3672# endif
3673# if PGM_GST_TYPE != PGM_SHW_TYPE
3674 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE);
3675# endif
3676# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3677 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3678# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3679 }
3680
3681 } /* for iPD */
3682 } /* for each PDPTE (PAE) */
3683 return VINF_SUCCESS;
3684
3685# else /* guest real and protected mode */
3686 return VINF_SUCCESS;
3687# endif
3688#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
3689#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3690}
3691
3692
3693
3694
3695#ifdef VBOX_STRICT
3696#ifdef IN_RC
3697# undef AssertMsgFailed
3698# define AssertMsgFailed Log
3699#endif
3700#ifdef IN_RING3
3701# include <VBox/dbgf.h>
3702
3703/**
3704 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
3705 *
3706 * @returns VBox status code (VINF_SUCCESS).
3707 * @param pVM The VM handle.
3708 * @param cr3 The root of the hierarchy.
3709 * @param crr The cr4, only PAE and PSE is currently used.
3710 * @param fLongMode Set if long mode, false if not long mode.
3711 * @param cMaxDepth Number of levels to dump.
3712 * @param pHlp Pointer to the output functions.
3713 */
3714__BEGIN_DECLS
3715VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
3716__END_DECLS
3717
3718#endif
3719
3720/**
3721 * Checks that the shadow page table is in sync with the guest one.
3722 *
3723 * @returns The number of errors.
3724 * @param pVM The virtual machine.
3725 * @param cr3 Guest context CR3 register
3726 * @param cr4 Guest context CR4 register
3727 * @param GCPtr Where to start. Defaults to 0.
3728 * @param cb How much to check. Defaults to everything.
3729 */
3730PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
3731{
3732#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3733 return 0;
3734#else
3735 unsigned cErrors = 0;
3736
3737#if PGM_GST_TYPE == PGM_TYPE_PAE
3738 /** @todo currently broken; crashes below somewhere */
3739 AssertFailed();
3740#endif
3741
3742#if PGM_GST_TYPE == PGM_TYPE_32BIT \
3743 || PGM_GST_TYPE == PGM_TYPE_PAE \
3744 || PGM_GST_TYPE == PGM_TYPE_AMD64
3745
3746# if PGM_GST_TYPE == PGM_TYPE_AMD64
3747 bool fBigPagesSupported = true;
3748# else
3749 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3750# endif
3751 PPGM pPGM = &pVM->pgm.s;
3752 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */
3753 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */
3754# ifndef IN_RING0
3755 RTHCPHYS HCPhys; /* general usage. */
3756# endif
3757 int rc;
3758
3759 /*
3760 * Check that the Guest CR3 and all its mappings are correct.
3761 */
3762 AssertMsgReturn(pPGM->GCPhysCR3 == (cr3 & GST_CR3_PAGE_MASK),
3763 ("Invalid GCPhysCR3=%RGp cr3=%RGp\n", pPGM->GCPhysCR3, (RTGCPHYS)cr3),
3764 false);
3765# if !defined(IN_RING0) && PGM_GST_TYPE != PGM_TYPE_AMD64
3766# if PGM_GST_TYPE == PGM_TYPE_32BIT
3767 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGst32BitPdRC, NULL, &HCPhysShw);
3768# else
3769 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGstPaePdptRC, NULL, &HCPhysShw);
3770# endif
3771 AssertRCReturn(rc, 1);
3772 HCPhys = NIL_RTHCPHYS;
3773 rc = pgmRamGCPhys2HCPhys(pPGM, cr3 & GST_CR3_PAGE_MASK, &HCPhys);
3774 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false);
3775# if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3)
3776 RTGCPHYS GCPhys;
3777 rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys);
3778 AssertRCReturn(rc, 1);
3779 AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false);
3780# endif
3781# endif /* !IN_RING0 */
3782
3783 /*
3784 * Get and check the Shadow CR3.
3785 */
3786# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3787 unsigned cPDEs = X86_PG_ENTRIES;
3788 unsigned cIncrement = X86_PG_ENTRIES * PAGE_SIZE;
3789# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3790# if PGM_GST_TYPE == PGM_TYPE_32BIT
3791 unsigned cPDEs = X86_PG_PAE_ENTRIES * 4; /* treat it as a 2048 entry table. */
3792# else
3793 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3794# endif
3795 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3796# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3797 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3798 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3799# endif
3800 if (cb != ~(RTGCPTR)0)
3801 cPDEs = RT_MIN(cb >> SHW_PD_SHIFT, 1);
3802
3803/** @todo call the other two PGMAssert*() functions. */
3804
3805# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3806 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3807# endif
3808
3809# if PGM_GST_TYPE == PGM_TYPE_AMD64
3810 unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3811
3812 for (; iPml4 < X86_PG_PAE_ENTRIES; iPml4++)
3813 {
3814 PPGMPOOLPAGE pShwPdpt = NULL;
3815 PX86PML4E pPml4eSrc;
3816 PX86PML4E pPml4eDst;
3817 RTGCPHYS GCPhysPdptSrc;
3818
3819 pPml4eSrc = pgmGstGetLongModePML4EPtr(&pVM->pgm.s, iPml4);
3820 pPml4eDst = pgmShwGetLongModePML4EPtr(&pVM->pgm.s, iPml4);
3821
3822 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */
3823 if (!pPml4eDst->n.u1Present)
3824 {
3825 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3826 continue;
3827 }
3828
3829 pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
3830 GCPhysPdptSrc = pPml4eSrc->u & X86_PML4E_PG_MASK_FULL;
3831
3832 if (pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present)
3833 {
3834 AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3835 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3836 cErrors++;
3837 continue;
3838 }
3839
3840 if (GCPhysPdptSrc != pShwPdpt->GCPhys)
3841 {
3842 AssertMsgFailed(("Physical address doesn't match! iPml4 %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
3843 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3844 cErrors++;
3845 continue;
3846 }
3847
3848 if ( pPml4eDst->n.u1User != pPml4eSrc->n.u1User
3849 || pPml4eDst->n.u1Write != pPml4eSrc->n.u1Write
3850 || pPml4eDst->n.u1NoExecute != pPml4eSrc->n.u1NoExecute)
3851 {
3852 AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3853 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3854 cErrors++;
3855 continue;
3856 }
3857# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3858 {
3859# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3860
3861# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3862 /*
3863 * Check the PDPTEs too.
3864 */
3865 unsigned iPdpt = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
3866
3867 for (;iPdpt <= SHW_PDPT_MASK; iPdpt++)
3868 {
3869 unsigned iPDSrc;
3870 PPGMPOOLPAGE pShwPde = NULL;
3871 PX86PDPE pPdpeDst;
3872 RTGCPHYS GCPhysPdeSrc;
3873# if PGM_GST_TYPE == PGM_TYPE_PAE
3874 X86PDPE PdpeSrc;
3875 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtr, &iPDSrc, &PdpeSrc);
3876 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
3877# else
3878 PX86PML4E pPml4eSrc;
3879 X86PDPE PdpeSrc;
3880 PX86PDPT pPdptDst;
3881 PX86PDPAE pPDDst;
3882 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3883
3884 rc = pgmShwGetLongModePDPtr(pVM, GCPtr, NULL, &pPdptDst, &pPDDst);
3885 if (rc != VINF_SUCCESS)
3886 {
3887 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
3888 GCPtr += 512 * _2M;
3889 continue; /* next PDPTE */
3890 }
3891 Assert(pPDDst);
3892# endif
3893 Assert(iPDSrc == 0);
3894
3895 pPdpeDst = &pPdptDst->a[iPdpt];
3896
3897 if (!pPdpeDst->n.u1Present)
3898 {
3899 GCPtr += 512 * _2M;
3900 continue; /* next PDPTE */
3901 }
3902
3903 pShwPde = pgmPoolGetPage(pPool, pPdpeDst->u & X86_PDPE_PG_MASK);
3904 GCPhysPdeSrc = PdpeSrc.u & X86_PDPE_PG_MASK;
3905
3906 if (pPdpeDst->n.u1Present != PdpeSrc.n.u1Present)
3907 {
3908 AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3909 GCPtr += 512 * _2M;
3910 cErrors++;
3911 continue;
3912 }
3913
3914 if (GCPhysPdeSrc != pShwPde->GCPhys)
3915 {
3916# if PGM_GST_TYPE == PGM_TYPE_AMD64
3917 AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3918# else
3919 AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3920# endif
3921 GCPtr += 512 * _2M;
3922 cErrors++;
3923 continue;
3924 }
3925
3926# if PGM_GST_TYPE == PGM_TYPE_AMD64
3927 if ( pPdpeDst->lm.u1User != PdpeSrc.lm.u1User
3928 || pPdpeDst->lm.u1Write != PdpeSrc.lm.u1Write
3929 || pPdpeDst->lm.u1NoExecute != PdpeSrc.lm.u1NoExecute)
3930 {
3931 AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3932 GCPtr += 512 * _2M;
3933 cErrors++;
3934 continue;
3935 }
3936# endif
3937
3938# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3939 {
3940# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3941# if PGM_GST_TYPE == PGM_TYPE_32BIT
3942 GSTPD const *pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3943# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3944 PCX86PD pPDDst = pgmShwGet32BitPDPtr(&pVM->pgm.s);
3945# endif
3946# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
3947 /*
3948 * Iterate the shadow page directory.
3949 */
3950 GCPtr = (GCPtr >> SHW_PD_SHIFT) << SHW_PD_SHIFT;
3951 unsigned iPDDst = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
3952
3953 for (;
3954 iPDDst < cPDEs;
3955 iPDDst++, GCPtr += cIncrement)
3956 {
3957# if PGM_SHW_TYPE == PGM_TYPE_PAE
3958 const SHWPDE PdeDst = *pgmShwGetPaePDEPtr(pPGM, GCPtr);
3959# else
3960 const SHWPDE PdeDst = pPDDst->a[iPDDst];
3961# endif
3962 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
3963 {
3964 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3965 if ((PdeDst.u & X86_PDE_AVL_MASK) != PGM_PDFLAGS_MAPPING)
3966 {
3967 AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
3968 cErrors++;
3969 continue;
3970 }
3971 }
3972 else if ( (PdeDst.u & X86_PDE_P)
3973 || ((PdeDst.u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY))
3974 )
3975 {
3976 HCPhysShw = PdeDst.u & SHW_PDE_PG_MASK;
3977 PPGMPOOLPAGE pPoolPage = pgmPoolGetPageByHCPhys(pVM, HCPhysShw);
3978 if (!pPoolPage)
3979 {
3980 AssertMsgFailed(("Invalid page table address %RHp at %RGv! PdeDst=%#RX64\n",
3981 HCPhysShw, GCPtr, (uint64_t)PdeDst.u));
3982 cErrors++;
3983 continue;
3984 }
3985 const SHWPT *pPTDst = (const SHWPT *)PGMPOOL_PAGE_2_PTR(pVM, pPoolPage);
3986
3987 if (PdeDst.u & (X86_PDE4M_PWT | X86_PDE4M_PCD))
3988 {
3989 AssertMsgFailed(("PDE flags PWT and/or PCD is set at %RGv! These flags are not virtualized! PdeDst=%#RX64\n",
3990 GCPtr, (uint64_t)PdeDst.u));
3991 cErrors++;
3992 }
3993
3994 if (PdeDst.u & (X86_PDE4M_G | X86_PDE4M_D))
3995 {
3996 AssertMsgFailed(("4K PDE reserved flags at %RGv! PdeDst=%#RX64\n",
3997 GCPtr, (uint64_t)PdeDst.u));
3998 cErrors++;
3999 }
4000
4001 const GSTPDE PdeSrc = pPDSrc->a[(iPDDst >> (GST_PD_SHIFT - SHW_PD_SHIFT)) & GST_PD_MASK];
4002 if (!PdeSrc.n.u1Present)
4003 {
4004 AssertMsgFailed(("Guest PDE at %RGv is not present! PdeDst=%#RX64 PdeSrc=%#RX64\n",
4005 GCPtr, (uint64_t)PdeDst.u, (uint64_t)PdeSrc.u));
4006 cErrors++;
4007 continue;
4008 }
4009
4010 if ( !PdeSrc.b.u1Size
4011 || !fBigPagesSupported)
4012 {
4013 GCPhysGst = PdeSrc.u & GST_PDE_PG_MASK;
4014# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4015 GCPhysGst |= (iPDDst & 1) * (PAGE_SIZE / 2);
4016# endif
4017 }
4018 else
4019 {
4020# if PGM_GST_TYPE == PGM_TYPE_32BIT
4021 if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK)
4022 {
4023 AssertMsgFailed(("Guest PDE at %RGv is using PSE36 or similar! PdeSrc=%#RX64\n",
4024 GCPtr, (uint64_t)PdeSrc.u));
4025 cErrors++;
4026 continue;
4027 }
4028# endif
4029 GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
4030# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4031 GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT);
4032# endif
4033 }
4034
4035 if ( pPoolPage->enmKind
4036 != (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
4037 {
4038 AssertMsgFailed(("Invalid shadow page table kind %d at %RGv! PdeSrc=%#RX64\n",
4039 pPoolPage->enmKind, GCPtr, (uint64_t)PdeSrc.u));
4040 cErrors++;
4041 }
4042
4043 PPGMPAGE pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4044 if (!pPhysPage)
4045 {
4046 AssertMsgFailed(("Cannot find guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
4047 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
4048 cErrors++;
4049 continue;
4050 }
4051
4052 if (GCPhysGst != pPoolPage->GCPhys)
4053 {
4054 AssertMsgFailed(("GCPhysGst=%RGp != pPage->GCPhys=%RGp at %RGv\n",
4055 GCPhysGst, pPoolPage->GCPhys, GCPtr));
4056 cErrors++;
4057 continue;
4058 }
4059
4060 if ( !PdeSrc.b.u1Size
4061 || !fBigPagesSupported)
4062 {
4063 /*
4064 * Page Table.
4065 */
4066 const GSTPT *pPTSrc;
4067 rc = PGM_GCPHYS_2_PTR(pVM, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
4068 if (RT_FAILURE(rc))
4069 {
4070 AssertMsgFailed(("Cannot map/convert guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
4071 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
4072 cErrors++;
4073 continue;
4074 }
4075 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/))
4076 != (PdeDst.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/)))
4077 {
4078 /// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
4079 // (This problem will go away when/if we shadow multiple CR3s.)
4080 AssertMsgFailed(("4K PDE flags mismatch at %RGv! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4081 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4082 cErrors++;
4083 continue;
4084 }
4085 if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4086 {
4087 AssertMsgFailed(("4K PDEs cannot have PGM_PDFLAGS_TRACK_DIRTY set! GCPtr=%RGv PdeDst=%#RX64\n",
4088 GCPtr, (uint64_t)PdeDst.u));
4089 cErrors++;
4090 continue;
4091 }
4092
4093 /* iterate the page table. */
4094# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4095 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
4096 const unsigned offPTSrc = ((GCPtr >> SHW_PD_SHIFT) & 1) * 512;
4097# else
4098 const unsigned offPTSrc = 0;
4099# endif
4100 for (unsigned iPT = 0, off = 0;
4101 iPT < RT_ELEMENTS(pPTDst->a);
4102 iPT++, off += PAGE_SIZE)
4103 {
4104 const SHWPTE PteDst = pPTDst->a[iPT];
4105
4106 /* skip not-present entries. */
4107 if (!(PteDst.u & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
4108 continue;
4109 Assert(PteDst.n.u1Present);
4110
4111 const GSTPTE PteSrc = pPTSrc->a[iPT + offPTSrc];
4112 if (!PteSrc.n.u1Present)
4113 {
4114# ifdef IN_RING3
4115 PGMAssertHandlerAndFlagsInSync(pVM);
4116 PGMR3DumpHierarchyGC(pVM, cr3, cr4, (PdeSrc.u & GST_PDE_PG_MASK));
4117# endif
4118 AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n",
4119 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u, pPTSrc, iPT + offPTSrc, PdeSrc.au32[0],
4120 (PdeSrc.u & GST_PDE_PG_MASK) + (iPT + offPTSrc)*sizeof(PteSrc)));
4121 cErrors++;
4122 continue;
4123 }
4124
4125 uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
4126# if 1 /** @todo sync accessed bit properly... */
4127 fIgnoreFlags |= X86_PTE_A;
4128# endif
4129
4130 /* match the physical addresses */
4131 HCPhysShw = PteDst.u & SHW_PTE_PG_MASK;
4132 GCPhysGst = PteSrc.u & GST_PTE_PG_MASK;
4133
4134# ifdef IN_RING3
4135 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4136 if (RT_FAILURE(rc))
4137 {
4138 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4139 {
4140 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
4141 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4142 cErrors++;
4143 continue;
4144 }
4145 }
4146 else if (HCPhysShw != (HCPhys & SHW_PTE_PG_MASK))
4147 {
4148 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4149 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4150 cErrors++;
4151 continue;
4152 }
4153# endif
4154
4155 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4156 if (!pPhysPage)
4157 {
4158# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4159 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4160 {
4161 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
4162 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4163 cErrors++;
4164 continue;
4165 }
4166# endif
4167 if (PteDst.n.u1Write)
4168 {
4169 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4170 GCPtr + off, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4171 cErrors++;
4172 }
4173 fIgnoreFlags |= X86_PTE_RW;
4174 }
4175 else if (HCPhysShw != (PGM_PAGE_GET_HCPHYS(pPhysPage) & SHW_PTE_PG_MASK))
4176 {
4177 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4178 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4179 cErrors++;
4180 continue;
4181 }
4182
4183 /* flags */
4184 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4185 {
4186 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4187 {
4188 if (PteDst.n.u1Write)
4189 {
4190 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! HCPhys=%RHp PteSrc=%#RX64 PteDst=%#RX64\n",
4191 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4192 cErrors++;
4193 continue;
4194 }
4195 fIgnoreFlags |= X86_PTE_RW;
4196 }
4197 else
4198 {
4199 if (PteDst.n.u1Present)
4200 {
4201 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! HCPhys=%RHp PteSrc=%#RX64 PteDst=%#RX64\n",
4202 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4203 cErrors++;
4204 continue;
4205 }
4206 fIgnoreFlags |= X86_PTE_P;
4207 }
4208 }
4209 else
4210 {
4211 if (!PteSrc.n.u1Dirty && PteSrc.n.u1Write)
4212 {
4213 if (PteDst.n.u1Write)
4214 {
4215 AssertMsgFailed(("!DIRTY page at %RGv is writable! PteSrc=%#RX64 PteDst=%#RX64\n",
4216 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4217 cErrors++;
4218 continue;
4219 }
4220 if (!(PteDst.u & PGM_PTFLAGS_TRACK_DIRTY))
4221 {
4222 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4223 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4224 cErrors++;
4225 continue;
4226 }
4227 if (PteDst.n.u1Dirty)
4228 {
4229 AssertMsgFailed(("!DIRTY page at %RGv is marked DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4230 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4231 cErrors++;
4232 }
4233# if 0 /** @todo sync access bit properly... */
4234 if (PteDst.n.u1Accessed != PteSrc.n.u1Accessed)
4235 {
4236 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4237 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4238 cErrors++;
4239 }
4240 fIgnoreFlags |= X86_PTE_RW;
4241# else
4242 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4243# endif
4244 }
4245 else if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4246 {
4247 /* access bit emulation (not implemented). */
4248 if (PteSrc.n.u1Accessed || PteDst.n.u1Present)
4249 {
4250 AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
4251 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4252 cErrors++;
4253 continue;
4254 }
4255 if (!PteDst.n.u1Accessed)
4256 {
4257 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
4258 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4259 cErrors++;
4260 }
4261 fIgnoreFlags |= X86_PTE_P;
4262 }
4263# ifdef DEBUG_sandervl
4264 fIgnoreFlags |= X86_PTE_D | X86_PTE_A;
4265# endif
4266 }
4267
4268 if ( (PteSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4269 && (PteSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags)
4270 )
4271 {
4272 AssertMsgFailed(("Flags mismatch at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
4273 GCPtr + off, (uint64_t)PteSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4274 fIgnoreFlags, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4275 cErrors++;
4276 continue;
4277 }
4278 } /* foreach PTE */
4279 }
4280 else
4281 {
4282 /*
4283 * Big Page.
4284 */
4285 uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
4286 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
4287 {
4288 if (PdeDst.n.u1Write)
4289 {
4290 AssertMsgFailed(("!DIRTY page at %RGv is writable! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4291 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4292 cErrors++;
4293 continue;
4294 }
4295 if (!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY))
4296 {
4297 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4298 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4299 cErrors++;
4300 continue;
4301 }
4302# if 0 /** @todo sync access bit properly... */
4303 if (PdeDst.n.u1Accessed != PdeSrc.b.u1Accessed)
4304 {
4305 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4306 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4307 cErrors++;
4308 }
4309 fIgnoreFlags |= X86_PTE_RW;
4310# else
4311 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4312# endif
4313 }
4314 else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4315 {
4316 /* access bit emulation (not implemented). */
4317 if (PdeSrc.b.u1Accessed || PdeDst.n.u1Present)
4318 {
4319 AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4320 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4321 cErrors++;
4322 continue;
4323 }
4324 if (!PdeDst.n.u1Accessed)
4325 {
4326 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4327 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4328 cErrors++;
4329 }
4330 fIgnoreFlags |= X86_PTE_P;
4331 }
4332
4333 if ((PdeSrc.u & ~fIgnoreFlags) != (PdeDst.u & ~fIgnoreFlags))
4334 {
4335 AssertMsgFailed(("Flags mismatch (B) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
4336 GCPtr, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PdeDst.u & ~fIgnoreFlags,
4337 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4338 cErrors++;
4339 }
4340
4341 /* iterate the page table. */
4342 for (unsigned iPT = 0, off = 0;
4343 iPT < RT_ELEMENTS(pPTDst->a);
4344 iPT++, off += PAGE_SIZE, GCPhysGst += PAGE_SIZE)
4345 {
4346 const SHWPTE PteDst = pPTDst->a[iPT];
4347
4348 if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4349 {
4350 AssertMsgFailed(("The PTE at %RGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
4351 GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4352 cErrors++;
4353 }
4354
4355 /* skip not-present entries. */
4356 if (!PteDst.n.u1Present) /** @todo deal with ALL handlers and CSAM !P pages! */
4357 continue;
4358
4359 fIgnoreFlags = X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT | X86_PTE_D | X86_PTE_A | X86_PTE_G | X86_PTE_PAE_NX;
4360
4361 /* match the physical addresses */
4362 HCPhysShw = PteDst.u & X86_PTE_PAE_PG_MASK;
4363
4364# ifdef IN_RING3
4365 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4366 if (RT_FAILURE(rc))
4367 {
4368 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4369 {
4370 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4371 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4372 cErrors++;
4373 }
4374 }
4375 else if (HCPhysShw != (HCPhys & X86_PTE_PAE_PG_MASK))
4376 {
4377 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4378 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4379 cErrors++;
4380 continue;
4381 }
4382# endif
4383 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4384 if (!pPhysPage)
4385 {
4386# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4387 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4388 {
4389 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4390 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4391 cErrors++;
4392 continue;
4393 }
4394# endif
4395 if (PteDst.n.u1Write)
4396 {
4397 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4398 GCPtr + off, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4399 cErrors++;
4400 }
4401 fIgnoreFlags |= X86_PTE_RW;
4402 }
4403 else if (HCPhysShw != (pPhysPage->HCPhys & X86_PTE_PAE_PG_MASK))
4404 {
4405 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4406 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4407 cErrors++;
4408 continue;
4409 }
4410
4411 /* flags */
4412 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4413 {
4414 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4415 {
4416 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
4417 {
4418 if (PteDst.n.u1Write)
4419 {
4420 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n",
4421 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4422 cErrors++;
4423 continue;
4424 }
4425 fIgnoreFlags |= X86_PTE_RW;
4426 }
4427 }
4428 else
4429 {
4430 if (PteDst.n.u1Present)
4431 {
4432 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n",
4433 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4434 cErrors++;
4435 continue;
4436 }
4437 fIgnoreFlags |= X86_PTE_P;
4438 }
4439 }
4440
4441 if ( (PdeSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4442 && (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags) /* lazy phys handler dereg. */
4443 )
4444 {
4445 AssertMsgFailed(("Flags mismatch (BT) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
4446 GCPtr + off, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4447 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4448 cErrors++;
4449 continue;
4450 }
4451 } /* for each PTE */
4452 }
4453 }
4454 /* not present */
4455
4456 } /* for each PDE */
4457
4458 } /* for each PDPTE */
4459
4460 } /* for each PML4E */
4461
4462# ifdef DEBUG
4463 if (cErrors)
4464 LogFlow(("AssertCR3: cErrors=%d\n", cErrors));
4465# endif
4466
4467#endif /* GST == 32BIT, PAE or AMD64 */
4468 return cErrors;
4469
4470#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
4471}
4472#endif /* VBOX_STRICT */
4473
4474
4475/**
4476 * Sets up the CR3 for shadow paging
4477 *
4478 * @returns Strict VBox status code.
4479 * @retval VINF_SUCCESS.
4480 *
4481 * @param pVM VM handle.
4482 * @param GCPhysCR3 The physical address in the CR3 register.
4483 */
4484PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
4485{
4486 /* Update guest paging info. */
4487#if PGM_GST_TYPE == PGM_TYPE_32BIT \
4488 || PGM_GST_TYPE == PGM_TYPE_PAE \
4489 || PGM_GST_TYPE == PGM_TYPE_AMD64
4490
4491 LogFlow(("MapCR3: %RGp\n", GCPhysCR3));
4492
4493 /*
4494 * Map the page CR3 points at.
4495 */
4496 RTHCPHYS HCPhysGuestCR3;
4497 RTHCPTR HCPtrGuestCR3;
4498 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
4499 if (RT_SUCCESS(rc))
4500 {
4501 rc = PGMMap(pVM, (RTGCPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
4502 if (RT_SUCCESS(rc))
4503 {
4504# ifdef IN_RC
4505 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
4506# endif
4507# if PGM_GST_TYPE == PGM_TYPE_32BIT
4508 pVM->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
4509# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4510 pVM->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
4511# endif
4512 pVM->pgm.s.pGst32BitPdRC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
4513
4514# elif PGM_GST_TYPE == PGM_TYPE_PAE
4515 unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
4516 pVM->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4517# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4518 pVM->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4519# endif
4520 pVM->pgm.s.pGstPaePdptRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
4521 Log(("Cached mapping %RRv\n", pVM->pgm.s.pGstPaePdptRC));
4522
4523 /*
4524 * Map the 4 PDs too.
4525 */
4526 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
4527 RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
4528 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
4529 {
4530 if (pGuestPDPT->a[i].n.u1Present)
4531 {
4532 RTHCPTR HCPtr;
4533 RTHCPHYS HCPhys;
4534 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
4535 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
4536 if (RT_SUCCESS(rc2))
4537 {
4538 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
4539 AssertRCReturn(rc, rc);
4540
4541 pVM->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr;
4542# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4543 pVM->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr;
4544# endif
4545 pVM->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
4546 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
4547 PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */
4548 continue;
4549 }
4550 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
4551 }
4552
4553 pVM->pgm.s.apGstPaePDsR3[i] = 0;
4554# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4555 pVM->pgm.s.apGstPaePDsR0[i] = 0;
4556# endif
4557 pVM->pgm.s.apGstPaePDsRC[i] = 0;
4558 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4559 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */
4560 }
4561
4562# elif PGM_GST_TYPE == PGM_TYPE_AMD64
4563 pVM->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
4564# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4565 pVM->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
4566# endif
4567# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
4568 if (!HWACCMIsNestedPagingActive(pVM))
4569 {
4570 /*
4571 * Update the shadow root page as well since that's not fixed.
4572 */
4573 /** @todo Move this into PGMAllBth.h. */
4574 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4575 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4576 {
4577 /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
4578 /** @todo Coordinate this better with the pool. */
4579 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)->enmKind != PGMPOOLKIND_FREE)
4580 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
4581 pVM->pgm.s.pShwPageCR3R3 = 0;
4582 pVM->pgm.s.pShwPageCR3R0 = 0;
4583 pVM->pgm.s.pShwRootR3 = 0;
4584# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4585 pVM->pgm.s.pShwRootR0 = 0;
4586# endif
4587 pVM->pgm.s.HCPhysShwCR3 = 0;
4588 }
4589
4590 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
4591 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4592 if (rc == VERR_PGM_POOL_FLUSHED)
4593 {
4594 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
4595 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
4596 return VINF_PGM_SYNC_CR3;
4597 }
4598 AssertRCReturn(rc, rc);
4599# ifdef IN_RING0
4600 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4601# else
4602 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4603# endif
4604 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
4605 Assert(pVM->pgm.s.pShwRootR3);
4606# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4607 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4608# endif
4609 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
4610 rc = VINF_SUCCESS; /* clear it - pgmPoolAlloc returns hints. */
4611 }
4612# endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
4613# endif
4614 }
4615 else
4616 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4617 }
4618 else
4619 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4620
4621#else /* prot/real stub */
4622 int rc = VINF_SUCCESS;
4623#endif
4624
4625#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4626 /* Update shadow paging info for guest modes with paging (32, pae, 64). */
4627# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4628 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4629 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
4630 && ( PGM_GST_TYPE != PGM_TYPE_REAL \
4631 && PGM_GST_TYPE != PGM_TYPE_PROT))
4632
4633 Assert(!HWACCMIsNestedPagingActive(pVM));
4634
4635 /*
4636 * Update the shadow root page as well since that's not fixed.
4637 */
4638 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4639 PPGMPOOLPAGE pOldShwPageCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
4640 uint32_t iOldShwUserTable = pVM->pgm.s.iShwUserTable;
4641 uint32_t iOldShwUser = pVM->pgm.s.iShwUser;
4642 PPGMPOOLPAGE pNewShwPageCR3;
4643
4644 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
4645 rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3);
4646 if (rc == VERR_PGM_POOL_FLUSHED)
4647 {
4648 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
4649 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
4650 return VINF_PGM_SYNC_CR3;
4651 }
4652 AssertRCReturn(rc, rc);
4653 rc = VINF_SUCCESS;
4654
4655# ifdef IN_RC
4656 /** NOTE: We can't deal with jumps to ring 3 here as we're now in an inconsistent state! */
4657 VMMGCLogDisable(pVM);
4658# endif
4659 /* Mark the page as locked; disallow flushing. */
4660 pgmPoolLockPage(pPool, pNewShwPageCR3);
4661
4662 pVM->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
4663 pVM->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
4664 pVM->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3;
4665# ifdef IN_RING0
4666 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4667 pVM->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4668# elif defined(IN_RC)
4669 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4670 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4671# else
4672 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4673 pVM->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4674# endif
4675 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
4676 Assert(pVM->pgm.s.pShwRootR3);
4677# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4678 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4679# endif
4680 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
4681
4682# ifndef PGM_WITHOUT_MAPPINGS
4683 /* Apply all hypervisor mappings to the new CR3.
4684 * Note that SyncCR3 will be executed in case CR3 is changed in a guest paging mode; this will
4685 * make sure we check for conflicts in the new CR3 root.
4686 */
4687# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
4688 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
4689# endif
4690 rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);
4691 AssertRCReturn(rc, rc);
4692# endif
4693
4694 /* Set the current hypervisor CR3. */
4695 CPUMSetHyperCR3(pVM, PGMGetHyperCR3(pVM));
4696
4697# ifdef IN_RC
4698 VMMGCLogEnable(pVM);
4699# endif
4700
4701 /* Clean up the old CR3 root. */
4702 if (pOldShwPageCR3)
4703 {
4704 Assert(pOldShwPageCR3->enmKind != PGMPOOLKIND_FREE);
4705# ifndef PGM_WITHOUT_MAPPINGS
4706 /* Remove the hypervisor mappings from the shadow page table. */
4707 pgmMapDeactivateCR3(pVM, pOldShwPageCR3);
4708# endif
4709 /* Mark the page as unlocked; allow flushing again. */
4710 pgmPoolUnlockPage(pPool, pOldShwPageCR3);
4711
4712 pgmPoolFreeByPage(pPool, pOldShwPageCR3, iOldShwUser, iOldShwUserTable);
4713 }
4714
4715# endif
4716#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
4717
4718 return rc;
4719}
4720
4721/**
4722 * Unmaps the shadow CR3.
4723 *
4724 * @returns VBox status, no specials.
4725 * @param pVM VM handle.
4726 */
4727PGM_BTH_DECL(int, UnmapCR3)(PVM pVM)
4728{
4729 LogFlow(("UnmapCR3\n"));
4730
4731 int rc = VINF_SUCCESS;
4732
4733 /* Update guest paging info. */
4734#if PGM_GST_TYPE == PGM_TYPE_32BIT
4735 pVM->pgm.s.pGst32BitPdR3 = 0;
4736#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4737 pVM->pgm.s.pGst32BitPdR0 = 0;
4738#endif
4739 pVM->pgm.s.pGst32BitPdRC = 0;
4740
4741#elif PGM_GST_TYPE == PGM_TYPE_PAE
4742 pVM->pgm.s.pGstPaePdptR3 = 0;
4743# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4744 pVM->pgm.s.pGstPaePdptR0 = 0;
4745# endif
4746 pVM->pgm.s.pGstPaePdptRC = 0;
4747 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
4748 {
4749 pVM->pgm.s.apGstPaePDsR3[i] = 0;
4750# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4751 pVM->pgm.s.apGstPaePDsR0[i] = 0;
4752# endif
4753 pVM->pgm.s.apGstPaePDsRC[i] = 0;
4754 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4755 }
4756
4757#elif PGM_GST_TYPE == PGM_TYPE_AMD64
4758 pVM->pgm.s.pGstAmd64Pml4R3 = 0;
4759# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4760 pVM->pgm.s.pGstAmd64Pml4R0 = 0;
4761# endif
4762# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
4763 if (!HWACCMIsNestedPagingActive(pVM))
4764 {
4765 pVM->pgm.s.pShwRootR3 = 0;
4766# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4767 pVM->pgm.s.pShwRootR0 = 0;
4768# endif
4769 pVM->pgm.s.HCPhysShwCR3 = 0;
4770 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4771 {
4772 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4773 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
4774 pVM->pgm.s.pShwPageCR3R3 = 0;
4775 pVM->pgm.s.pShwPageCR3R0 = 0;
4776 }
4777 }
4778# endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
4779
4780#else /* prot/real mode stub */
4781 /* nothing to do */
4782#endif
4783
4784#if defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */
4785 /* Update shadow paging info. */
4786# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4787 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4788 || PGM_SHW_TYPE == PGM_TYPE_AMD64))
4789
4790# if PGM_GST_TYPE != PGM_TYPE_REAL
4791 Assert(!HWACCMIsNestedPagingActive(pVM));
4792# endif
4793
4794# ifndef PGM_WITHOUT_MAPPINGS
4795 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4796 /* Remove the hypervisor mappings from the shadow page table. */
4797 pgmMapDeactivateCR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4798# endif
4799
4800 pVM->pgm.s.pShwRootR3 = 0;
4801# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4802 pVM->pgm.s.pShwRootR0 = 0;
4803# endif
4804 pVM->pgm.s.HCPhysShwCR3 = 0;
4805 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4806 {
4807 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4808
4809 /* Mark the page as unlocked; allow flushing again. */
4810 pgmPoolUnlockPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4811
4812 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pVM->pgm.s.iShwUser, pVM->pgm.s.iShwUserTable);
4813 pVM->pgm.s.pShwPageCR3R3 = 0;
4814 pVM->pgm.s.pShwPageCR3R0 = 0;
4815 pVM->pgm.s.iShwUser = 0;
4816 pVM->pgm.s.iShwUserTable = 0;
4817 }
4818# endif
4819#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY && !IN_RC*/
4820
4821 return rc;
4822}
4823
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette