VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllBth.h@ 12997

Last change on this file since 12997 was 12997, checked in by vboxsync, 17 years ago

Fixed regressions in return code checking

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 192.0 KB
Line 
1/* $Id: PGMAllBth.h 12997 2008-10-06 09:31:20Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
4 *
5 * This file is a big challenge!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Internal Functions *
26*******************************************************************************/
27__BEGIN_DECLS
28PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
29PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCUINTPTR GCPtrPage);
30PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uErr);
31PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCUINTPTR GCPtrPage);
32PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPD, PGSTPD pPDSrc, RTGCUINTPTR GCPtrPage);
33PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCUINTPTR Addr, unsigned fPage, unsigned uErr);
34PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCUINTPTR GCPtrPage);
35PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
36#ifdef VBOX_STRICT
37PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr = 0, RTGCUINTPTR cb = ~(RTGCUINTPTR)0);
38#endif
39#ifdef PGMPOOL_WITH_USER_TRACKING
40DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
41#endif
42__END_DECLS
43
44
45/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
46#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
47# error "Invalid combination; PAE guest implies PAE shadow"
48#endif
49
50#if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
51 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
52# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
53#endif
54
55#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
56 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
57# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
58#endif
59
60#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT) \
61 || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
62# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
63#endif
64
65#ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */
66# define PGM_WITHOUT_MAPPINGS
67#endif
68
69/**
70 * #PF Handler for raw-mode guest execution.
71 *
72 * @returns VBox status code (appropriate for trap handling and GC return).
73 * @param pVM VM Handle.
74 * @param uErr The trap error code.
75 * @param pRegFrame Trap register frame.
76 * @param pvFault The fault address.
77 */
78PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
79{
80#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
81 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
82 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
83
84# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
85 /*
86 * Hide the instruction fetch trap indicator for now.
87 */
88 /** @todo NXE will change this and we must fix NXE in the switcher too! */
89 if (uErr & X86_TRAP_PF_ID)
90 {
91 uErr &= ~X86_TRAP_PF_ID;
92 TRPMSetErrorCode(pVM, uErr);
93 }
94# endif
95
96 /*
97 * Get PDs.
98 */
99 int rc;
100# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
101# if PGM_GST_TYPE == PGM_TYPE_32BIT
102 const unsigned iPDSrc = (RTGCUINTPTR)pvFault >> GST_PD_SHIFT;
103 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
104
105# elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
106
107# if PGM_GST_TYPE == PGM_TYPE_PAE
108 unsigned iPDSrc;
109 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCUINTPTR)pvFault, &iPDSrc);
110
111# elif PGM_GST_TYPE == PGM_TYPE_AMD64
112 unsigned iPDSrc;
113 PX86PML4E pPml4eSrc;
114 X86PDPE PdpeSrc;
115 PGSTPD pPDSrc;
116
117 pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
118 Assert(pPml4eSrc);
119# endif
120 /* Quick check for a valid guest trap. */
121 if (!pPDSrc)
122 {
123 LogFlow(("Trap0eHandler: guest PDPTR %d not present CR3=%VGp\n", (pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK, (CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK)));
124 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eGuestTrap; });
125 TRPMSetErrorCode(pVM, uErr);
126 return VINF_EM_RAW_GUEST_TRAP;
127 }
128# endif
129# else
130 PGSTPD pPDSrc = NULL;
131 const unsigned iPDSrc = 0;
132# endif
133
134# if PGM_SHW_TYPE == PGM_TYPE_32BIT
135 const unsigned iPDDst = (RTGCUINTPTR)pvFault >> SHW_PD_SHIFT;
136 PX86PD pPDDst = pVM->pgm.s.CTXMID(p,32BitPD);
137# elif PGM_SHW_TYPE == PGM_TYPE_PAE
138 const unsigned iPDDst = (RTGCUINTPTR)pvFault >> SHW_PD_SHIFT;
139 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* We treat this as a PD with 2048 entries, so no need to and with SHW_PD_MASK to get iPDDst */
140
141# if PGM_GST_TYPE == PGM_TYPE_PAE
142 /* Did we mark the PDPT as not present in SyncCR3? */
143 unsigned iPdpte = ((RTGCUINTPTR)pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
144 if (!pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present)
145 pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present = 1;
146
147# endif
148
149# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
150 const unsigned iPDDst = (((RTGCUINTPTR)pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
151 PX86PDPAE pPDDst;
152# if PGM_GST_TYPE == PGM_TYPE_PROT
153 /* AMD-V nested paging */
154 X86PML4E Pml4eSrc;
155 X86PDPE PdpeSrc;
156 PX86PML4E pPml4eSrc = &Pml4eSrc;
157
158 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
159 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
160 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
161# endif
162
163 rc = PGMShwSyncLongModePDPtr(pVM, (RTGCUINTPTR)pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
164 AssertReturn(rc == VINF_SUCCESS /* *must* test for VINF_SUCCESS!! */, rc);
165 Assert(pPDDst);
166# elif PGM_SHW_TYPE == PGM_TYPE_EPT
167 const unsigned iPDDst = (((RTGCUINTPTR)pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
168 PEPTPD pPDDst;
169
170 rc = PGMShwGetEPTPDPtr(pVM, (RTGCUINTPTR)pvFault, NULL, &pPDDst);
171 AssertRCReturn(rc, rc);
172 Assert(pPDDst);
173# endif
174
175# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
176 /*
177 * If we successfully correct the write protection fault due to dirty bit
178 * tracking, or this page fault is a genuine one, then return immediately.
179 */
180 STAM_PROFILE_START(&pVM->pgm.s.StatCheckPageFault, e);
181 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], (RTGCUINTPTR)pvFault);
182 STAM_PROFILE_STOP(&pVM->pgm.s.StatCheckPageFault, e);
183 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
184 || rc == VINF_EM_RAW_GUEST_TRAP)
185 {
186 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution)
187 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVM->pgm.s.StatTrap0eDirtyAndAccessedBits : &pVM->pgm.s.StatTrap0eGuestTrap; });
188 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
189 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
190 }
191
192 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0ePD[iPDSrc]);
193# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
194
195 /*
196 * A common case is the not-present error caused by lazy page table syncing.
197 *
198 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
199 * so we can safely assume that the shadow PT is present when calling SyncPage later.
200 *
201 * On failure, we ASSUME that SyncPT is out of memory or detected some kind
202 * of mapping conflict and defer to SyncCR3 in R3.
203 * (Again, we do NOT support access handlers for non-present guest pages.)
204 *
205 */
206# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
207 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
208# else
209 GSTPDE PdeSrc;
210 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
211 PdeSrc.n.u1Present = 1;
212 PdeSrc.n.u1Write = 1;
213 PdeSrc.n.u1Accessed = 1;
214 PdeSrc.n.u1User = 1;
215# endif
216 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
217 && !pPDDst->a[iPDDst].n.u1Present
218 && PdeSrc.n.u1Present
219 )
220
221 {
222 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eSyncPT; });
223 STAM_PROFILE_START(&pVM->pgm.s.StatLazySyncPT, f);
224 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
225 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, (RTGCUINTPTR)pvFault);
226 if (VBOX_SUCCESS(rc))
227 {
228 STAM_PROFILE_STOP(&pVM->pgm.s.StatLazySyncPT, f);
229 return rc;
230 }
231 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
232 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
233 STAM_PROFILE_STOP(&pVM->pgm.s.StatLazySyncPT, f);
234 return VINF_PGM_SYNC_CR3;
235 }
236
237# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
238 /*
239 * Check if this address is within any of our mappings.
240 *
241 * This is *very* fast and it's gonna save us a bit of effort below and prevent
242 * us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).
243 * (BTW, it's impossible to have physical access handlers in a mapping.)
244 */
245 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
246 {
247 STAM_PROFILE_START(&pVM->pgm.s.StatMapping, a);
248 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
249 for ( ; pMapping; pMapping = CTXALLSUFF(pMapping->pNext))
250 {
251 if ((RTGCUINTPTR)pvFault < (RTGCUINTPTR)pMapping->GCPtr)
252 break;
253 if ((RTGCUINTPTR)pvFault - (RTGCUINTPTR)pMapping->GCPtr < pMapping->cb)
254 {
255 /*
256 * The first thing we check is if we've got an undetected conflict.
257 */
258 if (!pVM->pgm.s.fMappingsFixed)
259 {
260 unsigned iPT = pMapping->cb >> GST_PD_SHIFT;
261 while (iPT-- > 0)
262 if (pPDSrc->a[iPDSrc + iPT].n.u1Present)
263 {
264 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eConflicts);
265 Log(("Trap0e: Detected Conflict %VGv-%VGv\n", pMapping->GCPtr, pMapping->GCPtrLast));
266 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
267 STAM_PROFILE_STOP(&pVM->pgm.s.StatMapping, a);
268 return VINF_PGM_SYNC_CR3;
269 }
270 }
271
272 /*
273 * Check if the fault address is in a virtual page access handler range.
274 */
275 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->HyperVirtHandlers, pvFault);
276 if ( pCur
277 && (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb
278 && uErr & X86_TRAP_PF_RW)
279 {
280# ifdef IN_GC
281 STAM_PROFILE_START(&pCur->Stat, h);
282 rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr);
283 STAM_PROFILE_STOP(&pCur->Stat, h);
284# else
285 AssertFailed();
286 rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
287# endif
288 STAM_COUNTER_INC(&pVM->pgm.s.StatTrap0eMapHandler);
289 STAM_PROFILE_STOP(&pVM->pgm.s.StatMapping, a);
290 return rc;
291 }
292
293 /*
294 * Pretend we're not here and let the guest handle the trap.
295 */
296 TRPMSetErrorCode(pVM, uErr & ~X86_TRAP_PF_P);
297 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eMap);
298 LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));
299 STAM_PROFILE_STOP(&pVM->pgm.s.StatMapping, a);
300 return VINF_EM_RAW_GUEST_TRAP;
301 }
302 }
303 STAM_PROFILE_STOP(&pVM->pgm.s.StatMapping, a);
304 } /* pgmAreMappingsEnabled(&pVM->pgm.s) */
305# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
306
307 /*
308 * Check if this fault address is flagged for special treatment,
309 * which means we'll have to figure out the physical address and
310 * check flags associated with it.
311 *
312 * ASSUME that we can limit any special access handling to pages
313 * in page tables which the guest believes to be present.
314 */
315 if (PdeSrc.n.u1Present)
316 {
317 RTGCPHYS GCPhys = NIL_RTGCPHYS;
318
319# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
320# if PGM_GST_TYPE == PGM_TYPE_AMD64
321 bool fBigPagesSupported = true;
322# else
323 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
324# endif
325 if ( PdeSrc.b.u1Size
326 && fBigPagesSupported)
327 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc)
328 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
329 else
330 {
331 PGSTPT pPTSrc;
332 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
333 if (VBOX_SUCCESS(rc))
334 {
335 unsigned iPTESrc = ((RTGCUINTPTR)pvFault >> GST_PT_SHIFT) & GST_PT_MASK;
336 if (pPTSrc->a[iPTESrc].n.u1Present)
337 GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;
338 }
339 }
340# else
341 /* No paging so the fault address is the physical address */
342 GCPhys = (RTGCPHYS)((RTGCUINTPTR)pvFault & ~PAGE_OFFSET_MASK);
343# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
344
345 /*
346 * If we have a GC address we'll check if it has any flags set.
347 */
348 if (GCPhys != NIL_RTGCPHYS)
349 {
350 STAM_PROFILE_START(&pVM->pgm.s.StatHandlers, b);
351
352 PPGMPAGE pPage;
353 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
354 if (VBOX_SUCCESS(rc))
355 {
356 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
357 {
358 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
359 {
360 /*
361 * Physical page access handler.
362 */
363 const RTGCPHYS GCPhysFault = GCPhys | ((RTGCUINTPTR)pvFault & PAGE_OFFSET_MASK);
364 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysHandlers, GCPhysFault);
365 if (pCur)
366 {
367# ifdef PGM_SYNC_N_PAGES
368 /*
369 * If the region is write protected and we got a page not present fault, then sync
370 * the pages. If the fault was caused by a read, then restart the instruction.
371 * In case of write access continue to the GC write handler.
372 *
373 * ASSUMES that there is only one handler per page or that they have similar write properties.
374 */
375 if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
376 && !(uErr & X86_TRAP_PF_P))
377 {
378 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, PGM_SYNC_NR_PAGES, uErr);
379 if ( VBOX_FAILURE(rc)
380 || !(uErr & X86_TRAP_PF_RW)
381 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
382 {
383 AssertRC(rc);
384 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersOutOfSync);
385 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
386 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncHndPhys; });
387 return rc;
388 }
389 }
390# endif
391
392 AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
393 || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
394 ("Unexpected trap for physical handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
395
396#if defined(IN_GC) || defined(IN_RING0)
397 if (CTXALLSUFF(pCur->pfnHandler))
398 {
399 STAM_PROFILE_START(&pCur->Stat, h);
400 rc = pCur->CTXALLSUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, GCPhysFault, CTXALLSUFF(pCur->pvUser));
401 STAM_PROFILE_STOP(&pCur->Stat, h);
402 }
403 else
404#endif
405 rc = VINF_EM_RAW_EMULATE_INSTR;
406 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersPhysical);
407 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
408 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eHndPhys; });
409 return rc;
410 }
411 }
412# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
413 else
414 {
415# ifdef PGM_SYNC_N_PAGES
416 /*
417 * If the region is write protected and we got a page not present fault, then sync
418 * the pages. If the fault was caused by a read, then restart the instruction.
419 * In case of write access continue to the GC write handler.
420 */
421 if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
422 && !(uErr & X86_TRAP_PF_P))
423 {
424 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, PGM_SYNC_NR_PAGES, uErr);
425 if ( VBOX_FAILURE(rc)
426 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
427 || !(uErr & X86_TRAP_PF_RW))
428 {
429 AssertRC(rc);
430 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersOutOfSync);
431 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
432 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncHndVirt; });
433 return rc;
434 }
435 }
436# endif
437 /*
438 * Ok, it's an virtual page access handler.
439 *
440 * Since it's faster to search by address, we'll do that first
441 * and then retry by GCPhys if that fails.
442 */
443 /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
444 /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
445 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
446 */
447 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->VirtHandlers, pvFault);
448 if (pCur)
449 {
450 AssertMsg(!((RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb)
451 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
452 || !(uErr & X86_TRAP_PF_P)
453 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
454 ("Unexpected trap for virtual handler: %VGv (phys=%VGp) HCPhys=%HGp uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
455
456 if ( (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb
457 && ( uErr & X86_TRAP_PF_RW
458 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
459 {
460# ifdef IN_GC
461 STAM_PROFILE_START(&pCur->Stat, h);
462 rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr);
463 STAM_PROFILE_STOP(&pCur->Stat, h);
464# else
465 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
466# endif
467 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersVirtual);
468 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
469 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eHndVirt; });
470 return rc;
471 }
472 /* Unhandled part of a monitored page */
473 }
474 else
475 {
476 /* Check by physical address. */
477 PPGMVIRTHANDLER pCur;
478 unsigned iPage;
479 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + ((RTGCUINTPTR)pvFault & PAGE_OFFSET_MASK),
480 &pCur, &iPage);
481 Assert(VBOX_SUCCESS(rc) || !pCur);
482 if ( pCur
483 && ( uErr & X86_TRAP_PF_RW
484 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
485 {
486 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys);
487# ifdef IN_GC
488 RTGCUINTPTR off = (iPage << PAGE_SHIFT) + ((RTGCUINTPTR)pvFault & PAGE_OFFSET_MASK) - ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
489 Assert(off < pCur->cb);
490 STAM_PROFILE_START(&pCur->Stat, h);
491 rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, off);
492 STAM_PROFILE_STOP(&pCur->Stat, h);
493# else
494 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
495# endif
496 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersVirtualByPhys);
497 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
498 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eHndVirt; });
499 return rc;
500 }
501 }
502 }
503# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
504
505 /*
506 * There is a handled area of the page, but this fault doesn't belong to it.
507 * We must emulate the instruction.
508 *
509 * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
510 * we first check if this was a page-not-present fault for a page with only
511 * write access handlers. Restart the instruction if it wasn't a write access.
512 */
513 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersUnhandled);
514
515 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
516 && !(uErr & X86_TRAP_PF_P))
517 {
518 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, PGM_SYNC_NR_PAGES, uErr);
519 if ( VBOX_FAILURE(rc)
520 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
521 || !(uErr & X86_TRAP_PF_RW))
522 {
523 AssertRC(rc);
524 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersOutOfSync);
525 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
526 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncHndPhys; });
527 return rc;
528 }
529 }
530
531 /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
532 * It's writing to an unhandled part of the LDT page several million times.
533 */
534 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
535 LogFlow(("PGM: PGMInterpretInstruction -> rc=%d HCPhys=%RHp%s%s\n",
536 rc, pPage->HCPhys,
537 PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ? " phys" : "",
538 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ? " virt" : ""));
539 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
540 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eHndUnhandled; });
541 return rc;
542 } /* if any kind of handler */
543
544# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
545 if (uErr & X86_TRAP_PF_P)
546 {
547 /*
548 * The page isn't marked, but it might still be monitored by a virtual page access handler.
549 * (ASSUMES no temporary disabling of virtual handlers.)
550 */
551 /** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here,
552 * we should correct both the shadow page table and physical memory flags, and not only check for
553 * accesses within the handler region but for access to pages with virtual handlers. */
554 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->VirtHandlers, pvFault);
555 if (pCur)
556 {
557 AssertMsg( !((RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb)
558 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
559 || !(uErr & X86_TRAP_PF_P)
560 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
561 ("Unexpected trap for virtual handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
562
563 if ( (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb
564 && ( uErr & X86_TRAP_PF_RW
565 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
566 {
567# ifdef IN_GC
568 STAM_PROFILE_START(&pCur->Stat, h);
569 rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr);
570 STAM_PROFILE_STOP(&pCur->Stat, h);
571# else
572 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
573# endif
574 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersVirtualUnmarked);
575 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
576 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eHndVirt; });
577 return rc;
578 }
579 }
580 }
581# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
582 }
583 else
584 {
585 /* When the guest accesses invalid physical memory (e.g. probing of RAM or accessing a remapped MMIO range), then we'll fall
586 * back to the recompiler to emulate the instruction.
587 */
588 LogFlow(("pgmPhysGetPageEx %VGp failed with %Vrc\n", GCPhys, rc));
589 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersInvalid);
590 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
591 return VINF_EM_RAW_EMULATE_INSTR;
592 }
593
594 STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
595
596# ifdef PGM_OUT_OF_SYNC_IN_GC
597 /*
598 * We are here only if page is present in Guest page tables and trap is not handled
599 * by our handlers.
600 * Check it for page out-of-sync situation.
601 */
602 STAM_PROFILE_START(&pVM->pgm.s.StatOutOfSync, c);
603
604 if (!(uErr & X86_TRAP_PF_P))
605 {
606 /*
607 * Page is not present in our page tables.
608 * Try to sync it!
609 * BTW, fPageShw is invalid in this branch!
610 */
611 if (uErr & X86_TRAP_PF_US)
612 STAM_COUNTER_INC(&pVM->pgm.s.StatGCPageOutOfSyncUser);
613 else /* supervisor */
614 STAM_COUNTER_INC(&pVM->pgm.s.StatGCPageOutOfSyncSupervisor);
615
616# if defined(LOG_ENABLED) && !defined(IN_RING0)
617 RTGCPHYS GCPhys;
618 uint64_t fPageGst;
619 PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
620 Log(("Page out of sync: %VGv eip=%08x PdeSrc.n.u1User=%d fPageGst=%08llx GCPhys=%VGp scan=%d\n",
621 pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst, GCPhys, CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)));
622# endif /* LOG_ENABLED */
623
624# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0)
625 if (CPUMGetGuestCPL(pVM, pRegFrame) == 0)
626 {
627 uint64_t fPageGst;
628 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
629 if ( VBOX_SUCCESS(rc)
630 && !(fPageGst & X86_PTE_US))
631 {
632 /* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */
633 if ( pvFault == (RTGCPTR)pRegFrame->eip
634 || (RTGCUINTPTR)pvFault - pRegFrame->eip < 8 /* instruction crossing a page boundary */
635# ifdef CSAM_DETECT_NEW_CODE_PAGES
636 || ( !PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip)
637 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)) /* any new code we encounter here */
638# endif /* CSAM_DETECT_NEW_CODE_PAGES */
639 )
640 {
641 LogFlow(("CSAMExecFault %VGv\n", pRegFrame->eip));
642 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip);
643 if (rc != VINF_SUCCESS)
644 {
645 /*
646 * CSAM needs to perform a job in ring 3.
647 *
648 * Sync the page before going to the host context; otherwise we'll end up in a loop if
649 * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
650 */
651 LogFlow(("CSAM ring 3 job\n"));
652 int rc2 = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, 1, uErr);
653 AssertRC(rc2);
654
655 STAM_PROFILE_STOP(&pVM->pgm.s.StatOutOfSync, c);
656 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eCSAM; });
657 return rc;
658 }
659 }
660# ifdef CSAM_DETECT_NEW_CODE_PAGES
661 else
662 if ( uErr == X86_TRAP_PF_RW
663 && pRegFrame->ecx >= 0x100 /* early check for movswd count */
664 && pRegFrame->ecx < 0x10000
665 )
666 {
667 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions
668 * to detect loading of new code pages.
669 */
670
671 /*
672 * Decode the instruction.
673 */
674 RTGCPTR PC;
675 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
676 if (rc == VINF_SUCCESS)
677 {
678 DISCPUSTATE Cpu;
679 uint32_t cbOp;
680 rc = EMInterpretDisasOneEx(pVM, (RTGCUINTPTR)PC, pRegFrame, &Cpu, &cbOp);
681
682 /* For now we'll restrict this to rep movsw/d instructions */
683 if ( rc == VINF_SUCCESS
684 && Cpu.pCurInstr->opcode == OP_MOVSWD
685 && (Cpu.prefix & PREFIX_REP))
686 {
687 CSAMMarkPossibleCodePage(pVM, pvFault);
688 }
689 }
690 }
691# endif /* CSAM_DETECT_NEW_CODE_PAGES */
692
693 /*
694 * Mark this page as safe.
695 */
696 /** @todo not correct for pages that contain both code and data!! */
697 Log2(("CSAMMarkPage %VGv; scanned=%d\n", pvFault, true));
698 CSAMMarkPage(pVM, (RTRCPTR)pvFault, true);
699 }
700 }
701# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
702 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, PGM_SYNC_NR_PAGES, uErr);
703 if (VBOX_SUCCESS(rc))
704 {
705 /* The page was successfully synced, return to the guest. */
706 STAM_PROFILE_STOP(&pVM->pgm.s.StatOutOfSync, c);
707 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSync; });
708 return VINF_SUCCESS;
709 }
710 }
711 else
712 {
713 /*
714 * A side effect of not flushing global PDEs are out of sync pages due
715 * to physical monitored regions, that are no longer valid.
716 * Assume for now it only applies to the read/write flag
717 */
718 if (VBOX_SUCCESS(rc) && (uErr & X86_TRAP_PF_RW))
719 {
720 if (uErr & X86_TRAP_PF_US)
721 STAM_COUNTER_INC(&pVM->pgm.s.StatGCPageOutOfSyncUser);
722 else /* supervisor */
723 STAM_COUNTER_INC(&pVM->pgm.s.StatGCPageOutOfSyncSupervisor);
724
725
726 /*
727 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the page is not present, which is not true in this case.
728 */
729 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, 1, uErr);
730 if (VBOX_SUCCESS(rc))
731 {
732 /*
733 * Page was successfully synced, return to guest.
734 */
735# ifdef VBOX_STRICT
736 RTGCPHYS GCPhys;
737 uint64_t fPageGst;
738 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
739 Assert(VBOX_SUCCESS(rc) && fPageGst & X86_PTE_RW);
740 LogFlow(("Obsolete physical monitor page out of sync %VGv - phys %VGp flags=%08llx\n", pvFault, GCPhys, (uint64_t)fPageGst));
741
742 uint64_t fPageShw;
743 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
744 AssertMsg(VBOX_SUCCESS(rc) && fPageShw & X86_PTE_RW, ("rc=%Vrc fPageShw=%VX64\n", rc, fPageShw));
745# endif /* VBOX_STRICT */
746 STAM_PROFILE_STOP(&pVM->pgm.s.StatOutOfSync, c);
747 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncObsHnd; });
748 return VINF_SUCCESS;
749 }
750
751 /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */
752 if ( CPUMGetGuestCPL(pVM, pRegFrame) == 0
753 && ((CPUMGetGuestCR0(pVM) & (X86_CR0_WP|X86_CR0_PG)) == X86_CR0_PG)
754 && (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P))
755 {
756 uint64_t fPageGst;
757 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
758 if ( VBOX_SUCCESS(rc)
759 && !(fPageGst & X86_PTE_RW))
760 {
761 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
762 if (VBOX_SUCCESS(rc))
763 STAM_COUNTER_INC(&pVM->pgm.s.StatTrap0eWPEmulGC);
764 else
765 STAM_COUNTER_INC(&pVM->pgm.s.StatTrap0eWPEmulR3);
766 return rc;
767 }
768 else
769 AssertMsgFailed(("Unexpected r/w page %x flag=%x\n", pvFault, (uint32_t)fPageGst));
770 }
771
772 }
773
774# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
775# ifdef VBOX_STRICT
776 /*
777 * Check for VMM page flags vs. Guest page flags consistency.
778 * Currently only for debug purposes.
779 */
780 if (VBOX_SUCCESS(rc))
781 {
782 /* Get guest page flags. */
783 uint64_t fPageGst;
784 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
785 if (VBOX_SUCCESS(rc))
786 {
787 uint64_t fPageShw;
788 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
789
790 /*
791 * Compare page flags.
792 * Note: we have AVL, A, D bits desynched.
793 */
794 AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)),
795 ("Page flags mismatch! pvFault=%VGv GCPhys=%VGp fPageShw=%08llx fPageGst=%08llx\n", pvFault, GCPhys, fPageShw, fPageGst));
796 }
797 else
798 AssertMsgFailed(("PGMGstGetPage rc=%Vrc\n", rc));
799 }
800 else
801 AssertMsgFailed(("PGMGCGetPage rc=%Vrc\n", rc));
802# endif /* VBOX_STRICT */
803# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
804 }
805 STAM_PROFILE_STOP(&pVM->pgm.s.StatOutOfSync, c);
806# endif /* PGM_OUT_OF_SYNC_IN_GC */
807 }
808 else
809 {
810 /*
811 * Page not present in Guest OS or invalid page table address.
812 * This is potential virtual page access handler food.
813 *
814 * For the present we'll say that our access handlers don't
815 * work for this case - we've already discarded the page table
816 * not present case which is identical to this.
817 *
818 * When we perchance find we need this, we will probably have AVL
819 * trees (offset based) to operate on and we can measure their speed
820 * agains mapping a page table and probably rearrange this handling
821 * a bit. (Like, searching virtual ranges before checking the
822 * physical address.)
823 */
824 }
825 }
826
827
828# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
829 /*
830 * Conclusion, this is a guest trap.
831 */
832 LogFlow(("PGM: Unhandled #PF -> route trap to recompiler!\n"));
833 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUnhandled);
834 return VINF_EM_RAW_GUEST_TRAP;
835# else
836 /* present, but not a monitored page; perhaps the guest is probing physical memory */
837 return VINF_EM_RAW_EMULATE_INSTR;
838# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
839
840
841#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
842
843 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
844 return VERR_INTERNAL_ERROR;
845#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
846}
847
848
849/**
850 * Emulation of the invlpg instruction.
851 *
852 *
853 * @returns VBox status code.
854 *
855 * @param pVM VM handle.
856 * @param GCPtrPage Page to invalidate.
857 *
858 * @remark ASSUMES that the guest is updating before invalidating. This order
859 * isn't required by the CPU, so this is speculative and could cause
860 * trouble.
861 *
862 * @todo Flush page or page directory only if necessary!
863 * @todo Add a #define for simply invalidating the page.
864 */
865PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCUINTPTR GCPtrPage)
866{
867#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \
868 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
869 && PGM_SHW_TYPE != PGM_TYPE_EPT
870 int rc;
871
872 LogFlow(("InvalidatePage %VGv\n", GCPtrPage));
873 /*
874 * Get the shadow PD entry and skip out if this PD isn't present.
875 * (Guessing that it is frequent for a shadow PDE to not be present, do this first.)
876 */
877# if PGM_SHW_TYPE == PGM_TYPE_32BIT
878 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
879 PX86PDE pPdeDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst];
880# elif PGM_SHW_TYPE == PGM_TYPE_PAE
881 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; /* no mask; flat index into the 2048 entry array. */
882 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpte);
883 PX86PDEPAE pPdeDst = &pVM->pgm.s.CTXMID(ap,PaePDs[0])->a[iPDDst];
884 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); NOREF(pPdptDst);
885
886 /* If the shadow PDPE isn't present, then skip the invalidate. */
887 if (!pPdptDst->a[iPdpte].n.u1Present)
888 {
889 Assert(!(pPdptDst->a[iPdpte].u & PGM_PLXFLAGS_MAPPING));
890 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePageSkipped));
891 return VINF_SUCCESS;
892 }
893
894# else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
895 /* PML4 */
896 AssertReturn(pVM->pgm.s.pHCPaePML4, VERR_INTERNAL_ERROR);
897
898 const unsigned iPml4e = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;
899 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
900 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
901 PX86PDPAE pPDDst;
902 PX86PDPT pPdptDst;
903 PX86PML4E pPml4eDst = &pVM->pgm.s.pHCPaePML4->a[iPml4e];
904 rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
905 if (rc != VINF_SUCCESS)
906 {
907 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc));
908 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePageSkipped));
909 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
910 PGM_INVL_GUEST_TLBS();
911 return VINF_SUCCESS;
912 }
913 Assert(pPDDst);
914
915 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
916 PX86PDPE pPdpeDst = &pPdptDst->a[iPdpte];
917
918 if (!pPdpeDst->n.u1Present)
919 {
920 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePageSkipped));
921 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
922 PGM_INVL_GUEST_TLBS();
923 return VINF_SUCCESS;
924 }
925
926# endif /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
927
928 const SHWPDE PdeDst = *pPdeDst;
929 if (!PdeDst.n.u1Present)
930 {
931 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePageSkipped));
932 return VINF_SUCCESS;
933 }
934
935 /*
936 * Get the guest PD entry and calc big page.
937 */
938# if PGM_GST_TYPE == PGM_TYPE_32BIT
939 PX86PD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
940 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
941 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
942# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
943 unsigned iPDSrc;
944# if PGM_GST_TYPE == PGM_TYPE_PAE
945 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc);
946 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte];
947# else /* AMD64 */
948 PX86PML4E pPml4eSrc;
949 X86PDPE PdpeSrc;
950 PX86PDPAE pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
951# endif
952 GSTPDE PdeSrc;
953
954 if (pPDSrc)
955 PdeSrc = pPDSrc->a[iPDSrc];
956 else
957 PdeSrc.u = 0;
958# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
959
960# if PGM_GST_TYPE == PGM_TYPE_AMD64
961 const bool fIsBigPage = PdeSrc.b.u1Size;
962# else
963 const bool fIsBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
964# endif
965
966# ifdef IN_RING3
967 /*
968 * If a CR3 Sync is pending we may ignore the invalidate page operation
969 * depending on the kind of sync and if it's a global page or not.
970 * This doesn't make sense in GC/R0 so we'll skip it entirely there.
971 */
972# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
973 if ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)
974 || ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
975 && fIsBigPage
976 && PdeSrc.b.u1Global
977 )
978 )
979# else
980 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) )
981# endif
982 {
983 STAM_COUNTER_INC(&pVM->pgm.s.StatHCInvalidatePageSkipped);
984 return VINF_SUCCESS;
985 }
986# endif /* IN_RING3 */
987
988# if PGM_GST_TYPE == PGM_TYPE_AMD64
989 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
990
991 /* Fetch the pgm pool shadow descriptor. */
992 PPGMPOOLPAGE pShwPdpt = pgmPoolGetPageByHCPhys(pVM, pPml4eDst->u & X86_PML4E_PG_MASK);
993 Assert(pShwPdpt);
994
995 /* Fetch the pgm pool shadow descriptor. */
996 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & SHW_PDPE_PG_MASK);
997 Assert(pShwPde);
998
999 Assert(pPml4eDst->n.u1Present && (pPml4eDst->u & SHW_PDPT_MASK));
1000 RTGCPHYS GCPhysPdpt = pPml4eSrc->u & X86_PML4E_PG_MASK;
1001
1002 if ( !pPml4eSrc->n.u1Present
1003 || pShwPdpt->GCPhys != GCPhysPdpt)
1004 {
1005 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %VGv GCPhys=%VGp vs %VGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1006 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1007 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e);
1008 pPml4eDst->u = 0;
1009 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNPs));
1010 PGM_INVL_GUEST_TLBS();
1011 return VINF_SUCCESS;
1012 }
1013 if ( pPml4eSrc->n.u1User != pPml4eDst->n.u1User
1014 || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write))
1015 {
1016 /*
1017 * Mark not present so we can resync the PML4E when it's used.
1018 */
1019 LogFlow(("InvalidatePage: Out-of-sync PML4E at %VGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1020 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1021 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e);
1022 pPml4eDst->u = 0;
1023 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync));
1024 PGM_INVL_GUEST_TLBS();
1025 }
1026 else if (!pPml4eSrc->n.u1Accessed)
1027 {
1028 /*
1029 * Mark not present so we can set the accessed bit.
1030 */
1031 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %VGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1032 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1033 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e);
1034 pPml4eDst->u = 0;
1035 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNAs));
1036 PGM_INVL_GUEST_TLBS();
1037 }
1038
1039 /* Check if the PDPT entry has changed. */
1040 Assert(pPdpeDst->n.u1Present && pPdpeDst->u & SHW_PDPT_MASK);
1041 RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK;
1042 if ( !PdpeSrc.n.u1Present
1043 || pShwPde->GCPhys != GCPhysPd)
1044 {
1045 LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %VGv GCPhys=%VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
1046 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1047 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpte);
1048 pPdpeDst->u = 0;
1049 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNPs));
1050 PGM_INVL_GUEST_TLBS();
1051 return VINF_SUCCESS;
1052 }
1053 if ( PdpeSrc.lm.u1User != pPdpeDst->lm.u1User
1054 || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write))
1055 {
1056 /*
1057 * Mark not present so we can resync the PDPTE when it's used.
1058 */
1059 LogFlow(("InvalidatePage: Out-of-sync PDPE at %VGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1060 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1061 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpte);
1062 pPdpeDst->u = 0;
1063 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync));
1064 PGM_INVL_GUEST_TLBS();
1065 }
1066 else if (!PdpeSrc.lm.u1Accessed)
1067 {
1068 /*
1069 * Mark not present so we can set the accessed bit.
1070 */
1071 LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %VGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1072 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1073 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpte);
1074 pPdpeDst->u = 0;
1075 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNAs));
1076 PGM_INVL_GUEST_TLBS();
1077 }
1078# endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
1079
1080# if PGM_GST_TYPE == PGM_TYPE_PAE
1081 /* Note: This shouldn't actually be necessary as we monitor the PDPT page for changes. */
1082 if (!pPDSrc)
1083 {
1084 /* Guest PDPE not present */
1085 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* root of the 2048 PDE array */
1086 PX86PDEPAE pPDEDst = &pPDPAE->a[iPdpte * X86_PG_PAE_ENTRIES];
1087 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
1088
1089 Assert(!(CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte].n.u1Present));
1090 LogFlow(("InvalidatePage: guest PDPE %d not present; clear shw pdpe\n", iPdpte));
1091 /* for each page directory entry */
1092 for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++)
1093 {
1094 if ( pPDEDst[iPD].n.u1Present
1095 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING))
1096 {
1097 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdpte * X86_PG_PAE_ENTRIES + iPD);
1098 pPDEDst[iPD].u = 0;
1099 }
1100 }
1101 if (!(pPdptDst->a[iPdpte].u & PGM_PLXFLAGS_MAPPING))
1102 pPdptDst->a[iPdpte].n.u1Present = 0;
1103 PGM_INVL_GUEST_TLBS();
1104 }
1105 AssertMsg(pVM->pgm.s.fMappingsFixed || (PdpeSrc.u & X86_PDPE_PG_MASK) == pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpte], ("%VGp vs %VGp (mon)\n", (PdpeSrc.u & X86_PDPE_PG_MASK), pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpte]));
1106# endif
1107
1108
1109 /*
1110 * Deal with the Guest PDE.
1111 */
1112 rc = VINF_SUCCESS;
1113 if (PdeSrc.n.u1Present)
1114 {
1115 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
1116 {
1117 /*
1118 * Conflict - Let SyncPT deal with it to avoid duplicate code.
1119 */
1120 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1121 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PAE);
1122 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
1123 }
1124 else if ( PdeSrc.n.u1User != PdeDst.n.u1User
1125 || (!PdeSrc.n.u1Write && PdeDst.n.u1Write))
1126 {
1127 /*
1128 * Mark not present so we can resync the PDE when it's used.
1129 */
1130 LogFlow(("InvalidatePage: Out-of-sync at %VGp PdeSrc=%RX64 PdeDst=%RX64\n",
1131 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1132# if PGM_GST_TYPE == PGM_TYPE_AMD64
1133 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1134# else
1135 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1136# endif
1137 pPdeDst->u = 0;
1138 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync));
1139 PGM_INVL_GUEST_TLBS();
1140 }
1141 else if (!PdeSrc.n.u1Accessed)
1142 {
1143 /*
1144 * Mark not present so we can set the accessed bit.
1145 */
1146 LogFlow(("InvalidatePage: Out-of-sync (A) at %VGp PdeSrc=%RX64 PdeDst=%RX64\n",
1147 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1148# if PGM_GST_TYPE == PGM_TYPE_AMD64
1149 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1150# else
1151 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1152# endif
1153 pPdeDst->u = 0;
1154 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNAs));
1155 PGM_INVL_GUEST_TLBS();
1156 }
1157 else if (!fIsBigPage)
1158 {
1159 /*
1160 * 4KB - page.
1161 */
1162 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1163 RTGCPHYS GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1164# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1165 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1166 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1167# endif
1168 if (pShwPage->GCPhys == GCPhys)
1169 {
1170# if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
1171 const unsigned iPTEDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1172 PSHWPT pPT = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1173 if (pPT->a[iPTEDst].n.u1Present)
1174 {
1175# ifdef PGMPOOL_WITH_USER_TRACKING
1176 /* This is very unlikely with caching/monitoring enabled. */
1177 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
1178# endif
1179 pPT->a[iPTEDst].u = 0;
1180 }
1181# else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
1182 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
1183 if (VBOX_SUCCESS(rc))
1184 rc = VINF_SUCCESS;
1185# endif
1186 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePage4KBPages));
1187 PGM_INVL_PG(GCPtrPage);
1188 }
1189 else
1190 {
1191 /*
1192 * The page table address changed.
1193 */
1194 LogFlow(("InvalidatePage: Out-of-sync at %VGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%VGp iPDDst=%#x\n",
1195 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
1196# if PGM_GST_TYPE == PGM_TYPE_AMD64
1197 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1198# else
1199 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1200# endif
1201 pPdeDst->u = 0;
1202 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync));
1203 PGM_INVL_GUEST_TLBS();
1204 }
1205 }
1206 else
1207 {
1208 /*
1209 * 2/4MB - page.
1210 */
1211 /* Before freeing the page, check if anything really changed. */
1212 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1213 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1214# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1215 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1216 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1217# endif
1218 if ( pShwPage->GCPhys == GCPhys
1219 && pShwPage->enmKind == BTH_PGMPOOLKIND_PT_FOR_BIG)
1220 {
1221 /* ASSUMES a the given bits are identical for 4M and normal PDEs */
1222 /** @todo PAT */
1223 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1224 == (PdeDst.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1225 && ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
1226 || (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)))
1227 {
1228 LogFlow(("Skipping flush for big page containing %VGv (PD=%X .u=%VX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
1229 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePage4MBPagesSkip));
1230 return VINF_SUCCESS;
1231 }
1232 }
1233
1234 /*
1235 * Ok, the page table is present and it's been changed in the guest.
1236 * If we're in host context, we'll just mark it as not present taking the lazy approach.
1237 * We could do this for some flushes in GC too, but we need an algorithm for
1238 * deciding which 4MB pages containing code likely to be executed very soon.
1239 */
1240 LogFlow(("InvalidatePage: Out-of-sync PD at %VGp PdeSrc=%RX64 PdeDst=%RX64\n",
1241 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1242# if PGM_GST_TYPE == PGM_TYPE_AMD64
1243 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1244# else
1245 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1246# endif
1247 pPdeDst->u = 0;
1248 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePage4MBPages));
1249 PGM_INVL_BIG_PG(GCPtrPage);
1250 }
1251 }
1252 else
1253 {
1254 /*
1255 * Page directory is not present, mark shadow PDE not present.
1256 */
1257 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
1258 {
1259# if PGM_GST_TYPE == PGM_TYPE_AMD64
1260 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1261# else
1262 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1263# endif
1264 pPdeDst->u = 0;
1265 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNPs));
1266 PGM_INVL_PG(GCPtrPage);
1267 }
1268 else
1269 {
1270 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1271 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDMappings));
1272 }
1273 }
1274
1275 return rc;
1276
1277#else /* guest real and protected mode */
1278 /* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */
1279 return VINF_SUCCESS;
1280#endif
1281}
1282
1283
1284#ifdef PGMPOOL_WITH_USER_TRACKING
1285/**
1286 * Update the tracking of shadowed pages.
1287 *
1288 * @param pVM The VM handle.
1289 * @param pShwPage The shadow page.
1290 * @param HCPhys The physical page we is being dereferenced.
1291 */
1292DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
1293{
1294# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1295 STAM_PROFILE_START(&pVM->pgm.s.StatTrackDeref, a);
1296 LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%VHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
1297
1298 /** @todo If this turns out to be a bottle neck (*very* likely) two things can be done:
1299 * 1. have a medium sized HCPhys -> GCPhys TLB (hash?)
1300 * 2. write protect all shadowed pages. I.e. implement caching.
1301 */
1302 /*
1303 * Find the guest address.
1304 */
1305 for (PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1306 pRam;
1307 pRam = CTXALLSUFF(pRam->pNext))
1308 {
1309 unsigned iPage = pRam->cb >> PAGE_SHIFT;
1310 while (iPage-- > 0)
1311 {
1312 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
1313 {
1314 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
1315 pgmTrackDerefGCPhys(pPool, pShwPage, &pRam->aPages[iPage]);
1316 pShwPage->cPresent--;
1317 pPool->cPresent--;
1318 STAM_PROFILE_STOP(&pVM->pgm.s.StatTrackDeref, a);
1319 return;
1320 }
1321 }
1322 }
1323
1324 for (;;)
1325 AssertReleaseMsgFailed(("HCPhys=%VHp wasn't found!\n", HCPhys));
1326# else /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1327 pShwPage->cPresent--;
1328 pVM->pgm.s.CTXSUFF(pPool)->cPresent--;
1329# endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1330}
1331
1332
1333/**
1334 * Update the tracking of shadowed pages.
1335 *
1336 * @param pVM The VM handle.
1337 * @param pShwPage The shadow page.
1338 * @param u16 The top 16-bit of the pPage->HCPhys.
1339 * @param pPage Pointer to the guest page. this will be modified.
1340 * @param iPTDst The index into the shadow table.
1341 */
1342DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVM pVM, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
1343{
1344# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1345 /*
1346 * We're making certain assumptions about the placement of cRef and idx.
1347 */
1348 Assert(MM_RAM_FLAGS_IDX_SHIFT == 48);
1349 Assert(MM_RAM_FLAGS_CREFS_SHIFT > MM_RAM_FLAGS_IDX_SHIFT);
1350
1351 /*
1352 * Just deal with the simple first time here.
1353 */
1354 if (!u16)
1355 {
1356 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackVirgin);
1357 u16 = (1 << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | pShwPage->idx;
1358 }
1359 else
1360 u16 = pgmPoolTrackPhysExtAddref(pVM, u16, pShwPage->idx);
1361
1362 /* write back, trying to be clever... */
1363 Log2(("SyncPageWorkerTrackAddRef: u16=%#x pPage->HCPhys=%VHp->%VHp iPTDst=%#x\n",
1364 u16, pPage->HCPhys, (pPage->HCPhys & MM_RAM_FLAGS_NO_REFS_MASK) | ((uint64_t)u16 << MM_RAM_FLAGS_CREFS_SHIFT), iPTDst));
1365 *((uint16_t *)&pPage->HCPhys + 3) = u16; /** @todo PAGE FLAGS */
1366# endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1367
1368 /* update statistics. */
1369 pVM->pgm.s.CTXSUFF(pPool)->cPresent++;
1370 pShwPage->cPresent++;
1371 if (pShwPage->iFirstPresent > iPTDst)
1372 pShwPage->iFirstPresent = iPTDst;
1373}
1374#endif /* PGMPOOL_WITH_USER_TRACKING */
1375
1376
1377/**
1378 * Creates a 4K shadow page for a guest page.
1379 *
1380 * For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the
1381 * physical address. The PdeSrc argument only the flags are used. No page structured
1382 * will be mapped in this function.
1383 *
1384 * @param pVM VM handle.
1385 * @param pPteDst Destination page table entry.
1386 * @param PdeSrc Source page directory entry (i.e. Guest OS page directory entry).
1387 * Can safely assume that only the flags are being used.
1388 * @param PteSrc Source page table entry (i.e. Guest OS page table entry).
1389 * @param pShwPage Pointer to the shadow page.
1390 * @param iPTDst The index into the shadow table.
1391 *
1392 * @remark Not used for 2/4MB pages!
1393 */
1394DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVM pVM, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
1395{
1396 if (PteSrc.n.u1Present)
1397 {
1398 /*
1399 * Find the ram range.
1400 */
1401 PPGMPAGE pPage;
1402 int rc = pgmPhysGetPageEx(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK, &pPage);
1403 if (VBOX_SUCCESS(rc))
1404 {
1405 /** @todo investiage PWT, PCD and PAT. */
1406 /*
1407 * Make page table entry.
1408 */
1409 const RTHCPHYS HCPhys = pPage->HCPhys; /** @todo FLAGS */
1410 SHWPTE PteDst;
1411 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1412 {
1413 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
1414 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1415 {
1416#if PGM_SHW_TYPE == PGM_TYPE_EPT
1417 PteDst.u = (HCPhys & EPT_PTE_PG_MASK);
1418 PteDst.n.u1Present = 1;
1419 PteDst.n.u1Execute = 1;
1420 /* PteDst.n.u1Write = 0 && PteDst.n.u1Big = 0 */
1421#else
1422 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1423 | (HCPhys & X86_PTE_PAE_PG_MASK);
1424#endif
1425 }
1426 else
1427 {
1428 LogFlow(("SyncPageWorker: monitored page (%VGp) -> mark not present\n", HCPhys));
1429 PteDst.u = 0;
1430 }
1431 /** @todo count these two kinds. */
1432 }
1433 else
1434 {
1435#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1436 /*
1437 * If the page or page directory entry is not marked accessed,
1438 * we mark the page not present.
1439 */
1440 if (!PteSrc.n.u1Accessed || !PdeSrc.n.u1Accessed)
1441 {
1442 LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n"));
1443 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,AccessedPage));
1444 PteDst.u = 0;
1445 }
1446 else
1447 /*
1448 * If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
1449 * when the page is modified.
1450 */
1451 if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))
1452 {
1453 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,DirtyPage));
1454 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1455 | (HCPhys & X86_PTE_PAE_PG_MASK)
1456 | PGM_PTFLAGS_TRACK_DIRTY;
1457 }
1458 else
1459#endif
1460 {
1461 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,DirtyPageSkipped));
1462#if PGM_SHW_TYPE == PGM_TYPE_EPT
1463 PteDst.u = (HCPhys & EPT_PTE_PG_MASK);
1464 PteDst.n.u1Present = 1;
1465 PteDst.n.u1Write = 1;
1466 PteDst.n.u1Execute = 1;
1467 /* PteDst.n.u1Big = 0 */
1468#else
1469 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1470 | (HCPhys & X86_PTE_PAE_PG_MASK);
1471#endif
1472 }
1473 }
1474
1475#ifdef PGMPOOL_WITH_USER_TRACKING
1476 /*
1477 * Keep user track up to date.
1478 */
1479 if (PteDst.n.u1Present)
1480 {
1481 if (!pPteDst->n.u1Present)
1482 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1483 else if ((pPteDst->u & SHW_PTE_PG_MASK) != (PteDst.u & SHW_PTE_PG_MASK))
1484 {
1485 Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
1486 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1487 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1488 }
1489 }
1490 else if (pPteDst->n.u1Present)
1491 {
1492 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1493 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1494 }
1495#endif /* PGMPOOL_WITH_USER_TRACKING */
1496
1497 /*
1498 * Update statistics and commit the entry.
1499 */
1500 if (!PteSrc.n.u1Global)
1501 pShwPage->fSeenNonGlobal = true;
1502 *pPteDst = PteDst;
1503 }
1504 /* else MMIO or invalid page, we must handle them manually in the #PF handler. */
1505 /** @todo count these. */
1506 }
1507 else
1508 {
1509 /*
1510 * Page not-present.
1511 */
1512 LogFlow(("SyncPageWorker: page not present in Pte\n"));
1513#ifdef PGMPOOL_WITH_USER_TRACKING
1514 /* Keep user track up to date. */
1515 if (pPteDst->n.u1Present)
1516 {
1517 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1518 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1519 }
1520#endif /* PGMPOOL_WITH_USER_TRACKING */
1521 pPteDst->u = 0;
1522 /** @todo count these. */
1523 }
1524}
1525
1526
1527/**
1528 * Syncs a guest OS page.
1529 *
1530 * There are no conflicts at this point, neither is there any need for
1531 * page table allocations.
1532 *
1533 * @returns VBox status code.
1534 * @returns VINF_PGM_SYNCPAGE_MODIFIED_PDE if it modifies the PDE in any way.
1535 * @param pVM VM handle.
1536 * @param PdeSrc Page directory entry of the guest.
1537 * @param GCPtrPage Guest context page address.
1538 * @param cPages Number of pages to sync (PGM_SYNC_N_PAGES) (default=1).
1539 * @param uErr Fault error (X86_TRAP_PF_*).
1540 */
1541PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uErr)
1542{
1543 LogFlow(("SyncPage: GCPtrPage=%VGv cPages=%d uErr=%#x\n", GCPtrPage, cPages, uErr));
1544
1545#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
1546 || PGM_GST_TYPE == PGM_TYPE_PAE \
1547 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
1548 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1549 && PGM_SHW_TYPE != PGM_TYPE_EPT
1550
1551# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1552 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
1553# endif
1554
1555 /*
1556 * Assert preconditions.
1557 */
1558 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPagePD[(GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK]);
1559 Assert(PdeSrc.n.u1Present);
1560 Assert(cPages);
1561
1562 /*
1563 * Get the shadow PDE, find the shadow page table in the pool.
1564 */
1565# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1566 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
1567 X86PDE PdeDst = pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst];
1568# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1569 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
1570 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpte); /* no mask; flat index into the 2048 entry array. */
1571 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); NOREF(pPdptDst);
1572 X86PDEPAE PdeDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst];
1573# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1574 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1575 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1576 PX86PDPAE pPDDst;
1577 X86PDEPAE PdeDst;
1578 PX86PDPT pPdptDst;
1579
1580 int rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
1581 AssertRCReturn(rc, rc);
1582 Assert(pPDDst && pPdptDst);
1583 PdeDst = pPDDst->a[iPDDst];
1584# endif
1585 Assert(PdeDst.n.u1Present);
1586 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1587
1588# if PGM_GST_TYPE == PGM_TYPE_AMD64
1589 /* Fetch the pgm pool shadow descriptor. */
1590 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK);
1591 Assert(pShwPde);
1592# endif
1593
1594 /*
1595 * Check that the page is present and that the shadow PDE isn't out of sync.
1596 */
1597# if PGM_GST_TYPE == PGM_TYPE_AMD64
1598 const bool fBigPage = PdeSrc.b.u1Size;
1599# else
1600 const bool fBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1601# endif
1602 RTGCPHYS GCPhys;
1603 if (!fBigPage)
1604 {
1605 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1606# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1607 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1608 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1609# endif
1610 }
1611 else
1612 {
1613 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1614# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1615 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1616 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1617# endif
1618 }
1619 if ( pShwPage->GCPhys == GCPhys
1620 && PdeSrc.n.u1Present
1621 && (PdeSrc.n.u1User == PdeDst.n.u1User)
1622 && (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write)
1623# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1624 && (!fNoExecuteBitValid || PdeSrc.n.u1NoExecute == PdeDst.n.u1NoExecute)
1625# endif
1626 )
1627 {
1628 /*
1629 * Check that the PDE is marked accessed already.
1630 * Since we set the accessed bit *before* getting here on a #PF, this
1631 * check is only meant for dealing with non-#PF'ing paths.
1632 */
1633 if (PdeSrc.n.u1Accessed)
1634 {
1635 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1636 if (!fBigPage)
1637 {
1638 /*
1639 * 4KB Page - Map the guest page table.
1640 */
1641 PGSTPT pPTSrc;
1642 int rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
1643 if (VBOX_SUCCESS(rc))
1644 {
1645# ifdef PGM_SYNC_N_PAGES
1646 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1647 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1648 {
1649 /*
1650 * This code path is currently only taken when the caller is PGMTrap0eHandler
1651 * for non-present pages!
1652 *
1653 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1654 * deal with locality.
1655 */
1656 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1657# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1658 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1659 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
1660# else
1661 const unsigned offPTSrc = 0;
1662# endif
1663 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1664 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1665 iPTDst = 0;
1666 else
1667 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1668 for (; iPTDst < iPTDstEnd; iPTDst++)
1669 {
1670 if (!pPTDst->a[iPTDst].n.u1Present)
1671 {
1672 GSTPTE PteSrc = pPTSrc->a[offPTSrc + iPTDst];
1673 RTGCUINTPTR GCPtrCurPage = ((RTGCUINTPTR)GCPtrPage & ~(RTGCUINTPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
1674 NOREF(GCPtrCurPage);
1675#ifndef IN_RING0
1676 /*
1677 * Assuming kernel code will be marked as supervisor - and not as user level
1678 * and executed using a conforming code selector - And marked as readonly.
1679 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
1680 */
1681 PPGMPAGE pPage;
1682 if ( ((PdeSrc.u & PteSrc.u) & (X86_PTE_RW | X86_PTE_US))
1683 || iPTDst == ((GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK) /* always sync GCPtrPage */
1684 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)GCPtrCurPage)
1685 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
1686 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1687 )
1688#endif /* else: CSAM not active */
1689 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1690 Log2(("SyncPage: 4K+ %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1691 GCPtrCurPage, PteSrc.n.u1Present,
1692 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1693 PteSrc.n.u1User & PdeSrc.n.u1User,
1694 (uint64_t)PteSrc.u,
1695 (uint64_t)pPTDst->a[iPTDst].u,
1696 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1697 }
1698 }
1699 }
1700 else
1701# endif /* PGM_SYNC_N_PAGES */
1702 {
1703 const unsigned iPTSrc = (GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK;
1704 GSTPTE PteSrc = pPTSrc->a[iPTSrc];
1705 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1706 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1707 Log2(("SyncPage: 4K %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s\n",
1708 GCPtrPage, PteSrc.n.u1Present,
1709 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1710 PteSrc.n.u1User & PdeSrc.n.u1User,
1711 (uint64_t)PteSrc.u,
1712 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1713 }
1714 }
1715 else /* MMIO or invalid page: emulated in #PF handler. */
1716 {
1717 LogFlow(("PGM_GCPHYS_2_PTR %VGp failed with %Vrc\n", GCPhys, rc));
1718 Assert(!pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK].n.u1Present);
1719 }
1720 }
1721 else
1722 {
1723 /*
1724 * 4/2MB page - lazy syncing shadow 4K pages.
1725 * (There are many causes of getting here, it's no longer only CSAM.)
1726 */
1727 /* Calculate the GC physical address of this 4KB shadow page. */
1728 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc) | ((RTGCUINTPTR)GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
1729 /* Find ram range. */
1730 PPGMPAGE pPage;
1731 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1732 if (VBOX_SUCCESS(rc))
1733 {
1734 /*
1735 * Make shadow PTE entry.
1736 */
1737 const RTHCPHYS HCPhys = pPage->HCPhys; /** @todo PAGE FLAGS */
1738 SHWPTE PteDst;
1739 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1740 | (HCPhys & X86_PTE_PAE_PG_MASK);
1741 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1742 {
1743 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1744 PteDst.n.u1Write = 0;
1745 else
1746 PteDst.u = 0;
1747 }
1748 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1749# ifdef PGMPOOL_WITH_USER_TRACKING
1750 if (PteDst.n.u1Present && !pPTDst->a[iPTDst].n.u1Present)
1751 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1752# endif
1753 pPTDst->a[iPTDst] = PteDst;
1754
1755
1756 /*
1757 * If the page is not flagged as dirty and is writable, then make it read-only
1758 * at PD level, so we can set the dirty bit when the page is modified.
1759 *
1760 * ASSUMES that page access handlers are implemented on page table entry level.
1761 * Thus we will first catch the dirty access and set PDE.D and restart. If
1762 * there is an access handler, we'll trap again and let it work on the problem.
1763 */
1764 /** @todo r=bird: figure out why we need this here, SyncPT should've taken care of this already.
1765 * As for invlpg, it simply frees the whole shadow PT.
1766 * ...It's possibly because the guest clears it and the guest doesn't really tell us... */
1767 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
1768 {
1769 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,DirtyPageBig));
1770 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
1771 PdeDst.n.u1Write = 0;
1772 }
1773 else
1774 {
1775 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
1776 PdeDst.n.u1Write = PdeSrc.n.u1Write;
1777 }
1778# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1779 pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst] = PdeDst;
1780# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1781 pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst] = PdeDst;
1782# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1783 pPDDst->a[iPDDst] = PdeDst;
1784# endif
1785 Log2(("SyncPage: BIG %VGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} GCPhys=%VGp%s\n",
1786 GCPtrPage, PdeSrc.n.u1Present, PdeSrc.n.u1Write, PdeSrc.n.u1User, (uint64_t)PdeSrc.u, GCPhys,
1787 PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1788 }
1789 else
1790 LogFlow(("PGM_GCPHYS_2_PTR %VGp (big) failed with %Vrc\n", GCPhys, rc));
1791 }
1792 return VINF_SUCCESS;
1793 }
1794 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncPagePDNAs));
1795 }
1796 else
1797 {
1798 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncPagePDOutOfSync));
1799 Log2(("SyncPage: Out-Of-Sync PDE at %VGp PdeSrc=%RX64 PdeDst=%RX64\n",
1800 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1801 }
1802
1803 /*
1804 * Mark the PDE not present. Restart the instruction and let #PF call SyncPT.
1805 * Yea, I'm lazy.
1806 */
1807 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
1808# if PGM_GST_TYPE == PGM_TYPE_AMD64
1809 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
1810# else
1811 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPDDst);
1812# endif
1813
1814# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1815 pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst].u = 0;
1816# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1817 pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst].u = 0;
1818# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1819 pPDDst->a[iPDDst].u = 0;
1820# endif
1821 PGM_INVL_GUEST_TLBS();
1822 return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
1823
1824#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
1825 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1826 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
1827
1828# ifdef PGM_SYNC_N_PAGES
1829 /*
1830 * Get the shadow PDE, find the shadow page table in the pool.
1831 */
1832# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1833 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
1834 X86PDE PdeDst = pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst];
1835# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1836 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; /* no mask; flat index into the 2048 entry array. */
1837 X86PDEPAE PdeDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst];
1838# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1839 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1840 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; NOREF(iPdpte);
1841 PX86PDPAE pPDDst;
1842 X86PDEPAE PdeDst;
1843 PX86PDPT pPdptDst;
1844
1845 int rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
1846 AssertRCReturn(rc, rc);
1847 Assert(pPDDst && pPdptDst);
1848 PdeDst = pPDDst->a[iPDDst];
1849# elif PGM_SHW_TYPE == PGM_TYPE_EPT
1850 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1851 PEPTPD pPDDst;
1852 EPTPDE PdeDst;
1853
1854 int rc = PGMShwGetEPTPDPtr(pVM, GCPtrPage, NULL, &pPDDst);
1855 AssertRCReturn(rc, rc);
1856 Assert(pPDDst);
1857 PdeDst = pPDDst->a[iPDDst];
1858# endif
1859 Assert(PdeDst.n.u1Present);
1860 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1861 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1862
1863 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1864 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1865 {
1866 /*
1867 * This code path is currently only taken when the caller is PGMTrap0eHandler
1868 * for non-present pages!
1869 *
1870 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1871 * deal with locality.
1872 */
1873 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1874 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1875 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1876 iPTDst = 0;
1877 else
1878 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1879 for (; iPTDst < iPTDstEnd; iPTDst++)
1880 {
1881 if (!pPTDst->a[iPTDst].n.u1Present)
1882 {
1883 GSTPTE PteSrc;
1884
1885 RTGCUINTPTR GCPtrCurPage = ((RTGCUINTPTR)GCPtrPage & ~(RTGCUINTPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
1886
1887 /* Fake the page table entry */
1888 PteSrc.u = GCPtrCurPage;
1889 PteSrc.n.u1Present = 1;
1890 PteSrc.n.u1Dirty = 1;
1891 PteSrc.n.u1Accessed = 1;
1892 PteSrc.n.u1Write = 1;
1893 PteSrc.n.u1User = 1;
1894
1895 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1896
1897 Log2(("SyncPage: 4K+ %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1898 GCPtrCurPage, PteSrc.n.u1Present,
1899 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1900 PteSrc.n.u1User & PdeSrc.n.u1User,
1901 (uint64_t)PteSrc.u,
1902 (uint64_t)pPTDst->a[iPTDst].u,
1903 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1904 }
1905 else
1906 Log4(("%VGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", ((RTGCUINTPTR)GCPtrPage & ~(RTGCUINTPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, pPTDst->a[iPTDst].u));
1907 }
1908 }
1909 else
1910# endif /* PGM_SYNC_N_PAGES */
1911 {
1912 GSTPTE PteSrc;
1913 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1914 RTGCUINTPTR GCPtrCurPage = ((RTGCUINTPTR)GCPtrPage & ~(RTGCUINTPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
1915
1916 /* Fake the page table entry */
1917 PteSrc.u = GCPtrCurPage;
1918 PteSrc.n.u1Present = 1;
1919 PteSrc.n.u1Dirty = 1;
1920 PteSrc.n.u1Accessed = 1;
1921 PteSrc.n.u1Write = 1;
1922 PteSrc.n.u1User = 1;
1923 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1924
1925 Log2(("SyncPage: 4K %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}PteDst=%08llx%s\n",
1926 GCPtrPage, PteSrc.n.u1Present,
1927 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1928 PteSrc.n.u1User & PdeSrc.n.u1User,
1929 (uint64_t)PteSrc.u,
1930 (uint64_t)pPTDst->a[iPTDst].u,
1931 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1932 }
1933 return VINF_SUCCESS;
1934
1935#else
1936 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
1937 return VERR_INTERNAL_ERROR;
1938#endif
1939}
1940
1941
1942
1943#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1944
1945/**
1946 * Investigate page fault and handle write protection page faults caused by
1947 * dirty bit tracking.
1948 *
1949 * @returns VBox status code.
1950 * @param pVM VM handle.
1951 * @param uErr Page fault error code.
1952 * @param pPdeDst Shadow page directory entry.
1953 * @param pPdeSrc Guest page directory entry.
1954 * @param GCPtrPage Guest context page address.
1955 */
1956PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCUINTPTR GCPtrPage)
1957{
1958 bool fWriteProtect = !!(CPUMGetGuestCR0(pVM) & X86_CR0_WP);
1959 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US);
1960 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW);
1961# if PGM_GST_TYPE == PGM_TYPE_AMD64
1962 bool fBigPagesSupported = true;
1963# else
1964 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1965# endif
1966# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1967 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
1968# endif
1969 unsigned uPageFaultLevel;
1970 int rc;
1971
1972 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat, DirtyBitTracking), a);
1973 LogFlow(("CheckPageFault: GCPtrPage=%VGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
1974
1975# if PGM_GST_TYPE == PGM_TYPE_PAE \
1976 || PGM_GST_TYPE == PGM_TYPE_AMD64
1977
1978# if PGM_GST_TYPE == PGM_TYPE_AMD64
1979 PX86PML4E pPml4eSrc;
1980 PX86PDPE pPdpeSrc;
1981
1982 pPdpeSrc = pgmGstGetLongModePDPTPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc);
1983 Assert(pPml4eSrc);
1984
1985 /*
1986 * Real page fault? (PML4E level)
1987 */
1988 if ( (uErr & X86_TRAP_PF_RSVD)
1989 || !pPml4eSrc->n.u1Present
1990 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPml4eSrc->n.u1NoExecute)
1991 || (fWriteFault && !pPml4eSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
1992 || (fUserLevelFault && !pPml4eSrc->n.u1User)
1993 )
1994 {
1995 uPageFaultLevel = 0;
1996 goto UpperLevelPageFault;
1997 }
1998 Assert(pPdpeSrc);
1999
2000# else /* PAE */
2001 PX86PDPE pPdpeSrc = &pVM->pgm.s.CTXSUFF(pGstPaePDPT)->a[(GCPtrPage >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
2002# endif
2003
2004 /*
2005 * Real page fault? (PDPE level)
2006 */
2007 if ( (uErr & X86_TRAP_PF_RSVD)
2008 || !pPdpeSrc->n.u1Present
2009# if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */
2010 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdpeSrc->lm.u1NoExecute)
2011 || (fWriteFault && !pPdpeSrc->lm.u1Write && (fUserLevelFault || fWriteProtect))
2012 || (fUserLevelFault && !pPdpeSrc->lm.u1User)
2013# endif
2014 )
2015 {
2016 uPageFaultLevel = 1;
2017 goto UpperLevelPageFault;
2018 }
2019# endif
2020
2021 /*
2022 * Real page fault? (PDE level)
2023 */
2024 if ( (uErr & X86_TRAP_PF_RSVD)
2025 || !pPdeSrc->n.u1Present
2026# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2027 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdeSrc->n.u1NoExecute)
2028# endif
2029 || (fWriteFault && !pPdeSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2030 || (fUserLevelFault && !pPdeSrc->n.u1User) )
2031 {
2032 uPageFaultLevel = 2;
2033 goto UpperLevelPageFault;
2034 }
2035
2036 /*
2037 * First check the easy case where the page directory has been marked read-only to track
2038 * the dirty bit of an emulated BIG page
2039 */
2040 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2041 {
2042 /* Mark guest page directory as accessed */
2043# if PGM_GST_TYPE == PGM_TYPE_AMD64
2044 pPml4eSrc->n.u1Accessed = 1;
2045 pPdpeSrc->lm.u1Accessed = 1;
2046# endif
2047 pPdeSrc->b.u1Accessed = 1;
2048
2049 /*
2050 * Only write protection page faults are relevant here.
2051 */
2052 if (fWriteFault)
2053 {
2054 /* Mark guest page directory as dirty (BIG page only). */
2055 pPdeSrc->b.u1Dirty = 1;
2056
2057 if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
2058 {
2059 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,DirtyPageTrap));
2060
2061 Assert(pPdeSrc->b.u1Write);
2062
2063 pPdeDst->n.u1Write = 1;
2064 pPdeDst->n.u1Accessed = 1;
2065 pPdeDst->au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
2066 PGM_INVL_BIG_PG(GCPtrPage);
2067 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2068 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2069 }
2070 }
2071 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2072 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2073 }
2074 /* else: 4KB page table */
2075
2076 /*
2077 * Map the guest page table.
2078 */
2079 PGSTPT pPTSrc;
2080 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2081 if (VBOX_SUCCESS(rc))
2082 {
2083 /*
2084 * Real page fault?
2085 */
2086 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2087 const GSTPTE PteSrc = *pPteSrc;
2088 if ( !PteSrc.n.u1Present
2089# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2090 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && PteSrc.n.u1NoExecute)
2091# endif
2092 || (fWriteFault && !PteSrc.n.u1Write && (fUserLevelFault || fWriteProtect))
2093 || (fUserLevelFault && !PteSrc.n.u1User)
2094 )
2095 {
2096# ifdef IN_GC
2097 STAM_COUNTER_INC(&pVM->pgm.s.StatGCDirtyTrackRealPF);
2098# endif
2099 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2100 LogFlow(("CheckPageFault: real page fault at %VGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u));
2101
2102 /* Check the present bit as the shadow tables can cause different error codes by being out of sync.
2103 * See the 2nd case above as well.
2104 */
2105 if (pPdeSrc->n.u1Present && pPteSrc->n.u1Present)
2106 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2107
2108 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2109 return VINF_EM_RAW_GUEST_TRAP;
2110 }
2111 LogFlow(("CheckPageFault: page fault at %VGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
2112
2113 /*
2114 * Set the accessed bits in the page directory and the page table.
2115 */
2116# if PGM_GST_TYPE == PGM_TYPE_AMD64
2117 pPml4eSrc->n.u1Accessed = 1;
2118 pPdpeSrc->lm.u1Accessed = 1;
2119# endif
2120 pPdeSrc->n.u1Accessed = 1;
2121 pPteSrc->n.u1Accessed = 1;
2122
2123 /*
2124 * Only write protection page faults are relevant here.
2125 */
2126 if (fWriteFault)
2127 {
2128 /* Write access, so mark guest entry as dirty. */
2129# if defined(IN_GC) && defined(VBOX_WITH_STATISTICS)
2130 if (!pPteSrc->n.u1Dirty)
2131 STAM_COUNTER_INC(&pVM->pgm.s.StatGCDirtiedPage);
2132 else
2133 STAM_COUNTER_INC(&pVM->pgm.s.StatGCPageAlreadyDirty);
2134# endif
2135
2136 pPteSrc->n.u1Dirty = 1;
2137
2138 if (pPdeDst->n.u1Present)
2139 {
2140#ifndef IN_RING0
2141 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.
2142 * Our individual shadow handlers will provide more information and force a fatal exit.
2143 */
2144 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))
2145 {
2146 LogRel(("CheckPageFault: write to hypervisor region %VGv\n", GCPtrPage));
2147 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2148 return VINF_SUCCESS;
2149 }
2150#endif
2151 /*
2152 * Map shadow page table.
2153 */
2154 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2155 if (pShwPage)
2156 {
2157 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2158 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2159 if ( pPteDst->n.u1Present /** @todo Optimize accessed bit emulation? */
2160 && (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY))
2161 {
2162 LogFlow(("DIRTY page trap addr=%VGv\n", GCPtrPage));
2163# ifdef VBOX_STRICT
2164 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
2165 if (pPage)
2166 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
2167 ("Unexpected dirty bit tracking on monitored page %VGv (phys %VGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
2168# endif
2169 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,DirtyPageTrap));
2170
2171 Assert(pPteSrc->n.u1Write);
2172
2173 pPteDst->n.u1Write = 1;
2174 pPteDst->n.u1Dirty = 1;
2175 pPteDst->n.u1Accessed = 1;
2176 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY;
2177 PGM_INVL_PG(GCPtrPage);
2178
2179 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2180 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2181 }
2182 }
2183 else
2184 AssertMsgFailed(("pgmPoolGetPageByHCPhys %VGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));
2185 }
2186 }
2187/** @todo Optimize accessed bit emulation? */
2188# ifdef VBOX_STRICT
2189 /*
2190 * Sanity check.
2191 */
2192 else if ( !pPteSrc->n.u1Dirty
2193 && (pPdeSrc->n.u1Write & pPteSrc->n.u1Write)
2194 && pPdeDst->n.u1Present)
2195 {
2196 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2197 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2198 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2199 if ( pPteDst->n.u1Present
2200 && pPteDst->n.u1Write)
2201 LogFlow(("Writable present page %VGv not marked for dirty bit tracking!!!\n", GCPtrPage));
2202 }
2203# endif /* VBOX_STRICT */
2204 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2205 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2206 }
2207 AssertRC(rc);
2208 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,DirtyBitTracking), a);
2209 return rc;
2210
2211
2212UpperLevelPageFault:
2213 /* Pagefault detected while checking the PML4E, PDPE or PDE.
2214 * Single exit handler to get rid of duplicate code paths.
2215 */
2216# ifdef IN_GC
2217 STAM_COUNTER_INC(&pVM->pgm.s.StatGCDirtyTrackRealPF);
2218# endif
2219 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat, DirtyBitTracking), a);
2220 Log(("CheckPageFault: real page fault at %VGv (%d)\n", GCPtrPage, uPageFaultLevel));
2221
2222 if (
2223# if PGM_GST_TYPE == PGM_TYPE_AMD64
2224 pPml4eSrc->n.u1Present &&
2225# endif
2226# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
2227 pPdpeSrc->n.u1Present &&
2228# endif
2229 pPdeSrc->n.u1Present)
2230 {
2231 /* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
2232 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2233 {
2234 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2235 }
2236 else
2237 {
2238 /*
2239 * Map the guest page table.
2240 */
2241 PGSTPT pPTSrc;
2242 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2243 if (VBOX_SUCCESS(rc))
2244 {
2245 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2246 const GSTPTE PteSrc = *pPteSrc;
2247 if (pPteSrc->n.u1Present)
2248 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2249 }
2250 AssertRC(rc);
2251 }
2252 }
2253 return VINF_EM_RAW_GUEST_TRAP;
2254}
2255
2256#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
2257
2258
2259/**
2260 * Sync a shadow page table.
2261 *
2262 * The shadow page table is not present. This includes the case where
2263 * there is a conflict with a mapping.
2264 *
2265 * @returns VBox status code.
2266 * @param pVM VM handle.
2267 * @param iPD Page directory index.
2268 * @param pPDSrc Source page directory (i.e. Guest OS page directory).
2269 * Assume this is a temporary mapping.
2270 * @param GCPtrPage GC Pointer of the page that caused the fault
2271 */
2272PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPDSrc, PGSTPD pPDSrc, RTGCUINTPTR GCPtrPage)
2273{
2274 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncPT), a);
2275 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPtPD[iPDSrc]);
2276 LogFlow(("SyncPT: GCPtrPage=%VGv\n", GCPtrPage));
2277
2278#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
2279 || PGM_GST_TYPE == PGM_TYPE_PAE \
2280 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2281 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2282 && PGM_SHW_TYPE != PGM_TYPE_EPT
2283
2284 int rc = VINF_SUCCESS;
2285
2286 /*
2287 * Validate input a little bit.
2288 */
2289 AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%VGv\n", iPDSrc, GCPtrPage));
2290# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2291 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
2292 PX86PD pPDDst = pVM->pgm.s.CTXMID(p,32BitPD);
2293# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2294 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; /* no mask; flat index into the 2048 entry array. */
2295 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpte);
2296 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); NOREF(pPdptDst);
2297 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0];
2298# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2299 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2300 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2301 PX86PDPAE pPDDst;
2302 PX86PDPT pPdptDst;
2303 rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
2304 AssertRCReturn(rc, rc);
2305 Assert(pPDDst);
2306# endif
2307
2308 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2309 SHWPDE PdeDst = *pPdeDst;
2310
2311# if PGM_GST_TYPE == PGM_TYPE_AMD64
2312 /* Fetch the pgm pool shadow descriptor. */
2313 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK);
2314 Assert(pShwPde);
2315# endif
2316
2317# ifndef PGM_WITHOUT_MAPPINGS
2318 /*
2319 * Check for conflicts.
2320 * GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
2321 * HC: Simply resolve the conflict.
2322 */
2323 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
2324 {
2325 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
2326# ifndef IN_RING3
2327 Log(("SyncPT: Conflict at %VGv\n", GCPtrPage));
2328 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a);
2329 return VERR_ADDRESS_CONFLICT;
2330# else
2331 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage);
2332 Assert(pMapping);
2333# if PGM_GST_TYPE == PGM_TYPE_32BIT
2334 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2335# elif PGM_GST_TYPE == PGM_TYPE_PAE
2336 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2337# else
2338 AssertFailed(); /* can't happen for amd64 */
2339# endif
2340 if (VBOX_FAILURE(rc))
2341 {
2342 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a);
2343 return rc;
2344 }
2345 PdeDst = *pPdeDst;
2346# endif
2347 }
2348# else /* PGM_WITHOUT_MAPPINGS */
2349 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
2350# endif /* PGM_WITHOUT_MAPPINGS */
2351 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2352
2353 /*
2354 * Sync page directory entry.
2355 */
2356 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2357 if (PdeSrc.n.u1Present)
2358 {
2359 /*
2360 * Allocate & map the page table.
2361 */
2362 PSHWPT pPTDst;
2363# if PGM_GST_TYPE == PGM_TYPE_AMD64
2364 const bool fPageTable = !PdeSrc.b.u1Size;
2365# else
2366 const bool fPageTable = !PdeSrc.b.u1Size || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
2367# endif
2368 PPGMPOOLPAGE pShwPage;
2369 RTGCPHYS GCPhys;
2370 if (fPageTable)
2371 {
2372 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
2373# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2374 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2375 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);
2376# endif
2377# if PGM_GST_TYPE == PGM_TYPE_AMD64
2378 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2379# else
2380 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2381# endif
2382 }
2383 else
2384 {
2385 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
2386# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2387 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
2388 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
2389# endif
2390# if PGM_GST_TYPE == PGM_TYPE_AMD64
2391 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx, iPDDst, &pShwPage);
2392# else
2393 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2394# endif
2395 }
2396 if (rc == VINF_SUCCESS)
2397 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2398 else if (rc == VINF_PGM_CACHED_PAGE)
2399 {
2400 /*
2401 * The PT was cached, just hook it up.
2402 */
2403 if (fPageTable)
2404 PdeDst.u = pShwPage->Core.Key
2405 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2406 else
2407 {
2408 PdeDst.u = pShwPage->Core.Key
2409 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2410 /* (see explanation and assumptions further down.) */
2411 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2412 {
2413 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,DirtyPageBig));
2414 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2415 PdeDst.b.u1Write = 0;
2416 }
2417 }
2418 *pPdeDst = PdeDst;
2419 return VINF_SUCCESS;
2420 }
2421 else if (rc == VERR_PGM_POOL_FLUSHED)
2422 {
2423 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
2424 return VINF_PGM_SYNC_CR3;
2425 }
2426 else
2427 AssertMsgFailedReturn(("rc=%Vrc\n", rc), VERR_INTERNAL_ERROR);
2428 PdeDst.u &= X86_PDE_AVL_MASK;
2429 PdeDst.u |= pShwPage->Core.Key;
2430
2431 /*
2432 * Page directory has been accessed (this is a fault situation, remember).
2433 */
2434 pPDSrc->a[iPDSrc].n.u1Accessed = 1;
2435 if (fPageTable)
2436 {
2437 /*
2438 * Page table - 4KB.
2439 *
2440 * Sync all or just a few entries depending on PGM_SYNC_N_PAGES.
2441 */
2442 Log2(("SyncPT: 4K %VGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx}\n",
2443 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u));
2444 PGSTPT pPTSrc;
2445 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
2446 if (VBOX_SUCCESS(rc))
2447 {
2448 /*
2449 * Start by syncing the page directory entry so CSAM's TLB trick works.
2450 */
2451 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | X86_PDE_AVL_MASK))
2452 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2453 *pPdeDst = PdeDst;
2454
2455 /*
2456 * Directory/page user or supervisor privilege: (same goes for read/write)
2457 *
2458 * Directory Page Combined
2459 * U/S U/S U/S
2460 * 0 0 0
2461 * 0 1 0
2462 * 1 0 0
2463 * 1 1 1
2464 *
2465 * Simple AND operation. Table listed for completeness.
2466 *
2467 */
2468 STAM_COUNTER_INC(CTXSUFF(&pVM->pgm.s.StatSynPT4k));
2469# ifdef PGM_SYNC_N_PAGES
2470 unsigned iPTBase = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2471 unsigned iPTDst = iPTBase;
2472 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
2473 if (iPTDst <= PGM_SYNC_NR_PAGES / 2)
2474 iPTDst = 0;
2475 else
2476 iPTDst -= PGM_SYNC_NR_PAGES / 2;
2477# else /* !PGM_SYNC_N_PAGES */
2478 unsigned iPTDst = 0;
2479 const unsigned iPTDstEnd = RT_ELEMENTS(pPTDst->a);
2480# endif /* !PGM_SYNC_N_PAGES */
2481# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2482 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2483 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
2484# else
2485 const unsigned offPTSrc = 0;
2486# endif
2487 for (; iPTDst < iPTDstEnd; iPTDst++)
2488 {
2489 const unsigned iPTSrc = iPTDst + offPTSrc;
2490 const GSTPTE PteSrc = pPTSrc->a[iPTSrc];
2491
2492 if (PteSrc.n.u1Present) /* we've already cleared it above */
2493 {
2494# ifndef IN_RING0
2495 /*
2496 * Assuming kernel code will be marked as supervisor - and not as user level
2497 * and executed using a conforming code selector - And marked as readonly.
2498 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
2499 */
2500 PPGMPAGE pPage;
2501 if ( ((PdeSrc.u & pPTSrc->a[iPTSrc].u) & (X86_PTE_RW | X86_PTE_US))
2502 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)))
2503 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
2504 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2505 )
2506# endif
2507 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2508 Log2(("SyncPT: 4K+ %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%VGp\n",
2509 (RTGCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)),
2510 PteSrc.n.u1Present,
2511 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2512 PteSrc.n.u1User & PdeSrc.n.u1User,
2513 (uint64_t)PteSrc.u,
2514 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],
2515 (PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)));
2516 }
2517 } /* for PTEs */
2518 }
2519 }
2520 else
2521 {
2522 /*
2523 * Big page - 2/4MB.
2524 *
2525 * We'll walk the ram range list in parallel and optimize lookups.
2526 * We will only sync on shadow page table at a time.
2527 */
2528 STAM_COUNTER_INC(CTXSUFF(&pVM->pgm.s.StatSynPT4M));
2529
2530 /**
2531 * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
2532 */
2533
2534 /*
2535 * Start by syncing the page directory entry.
2536 */
2537 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | (X86_PDE_AVL_MASK & ~PGM_PDFLAGS_TRACK_DIRTY)))
2538 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2539
2540 /*
2541 * If the page is not flagged as dirty and is writable, then make it read-only
2542 * at PD level, so we can set the dirty bit when the page is modified.
2543 *
2544 * ASSUMES that page access handlers are implemented on page table entry level.
2545 * Thus we will first catch the dirty access and set PDE.D and restart. If
2546 * there is an access handler, we'll trap again and let it work on the problem.
2547 */
2548 /** @todo move the above stuff to a section in the PGM documentation. */
2549 Assert(!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY));
2550 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2551 {
2552 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,DirtyPageBig));
2553 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2554 PdeDst.b.u1Write = 0;
2555 }
2556 *pPdeDst = PdeDst;
2557
2558 /*
2559 * Fill the shadow page table.
2560 */
2561 /* Get address and flags from the source PDE. */
2562 SHWPTE PteDstBase;
2563 PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
2564
2565 /* Loop thru the entries in the shadow PT. */
2566 const RTGCUINTPTR GCPtr = (GCPtrPage >> SHW_PD_SHIFT) << SHW_PD_SHIFT; NOREF(GCPtr);
2567 Log2(("SyncPT: BIG %VGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} Shw=%VGv GCPhys=%VGp %s\n",
2568 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr,
2569 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2570 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
2571 unsigned iPTDst = 0;
2572 while (iPTDst < RT_ELEMENTS(pPTDst->a))
2573 {
2574 /* Advance ram range list. */
2575 while (pRam && GCPhys > pRam->GCPhysLast)
2576 pRam = CTXALLSUFF(pRam->pNext);
2577 if (pRam && GCPhys >= pRam->GCPhys)
2578 {
2579 unsigned iHCPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2580 do
2581 {
2582 /* Make shadow PTE. */
2583 PPGMPAGE pPage = &pRam->aPages[iHCPage];
2584 SHWPTE PteDst;
2585
2586 /* Make sure the RAM has already been allocated. */
2587 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) /** @todo PAGE FLAGS */
2588 {
2589 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
2590 {
2591# ifdef IN_RING3
2592 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
2593# else
2594 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2595# endif
2596 if (rc != VINF_SUCCESS)
2597 return rc;
2598 }
2599 }
2600
2601 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2602 {
2603 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
2604 {
2605 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2606 PteDst.n.u1Write = 0;
2607 }
2608 else
2609 PteDst.u = 0;
2610 }
2611# ifndef IN_RING0
2612 /*
2613 * Assuming kernel code will be marked as supervisor and not as user level and executed
2614 * using a conforming code selector. Don't check for readonly, as that implies the whole
2615 * 4MB can be code or readonly data. Linux enables write access for its large pages.
2616 */
2617 else if ( !PdeSrc.n.u1User
2618 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))))
2619 PteDst.u = 0;
2620# endif
2621 else
2622 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2623# ifdef PGMPOOL_WITH_USER_TRACKING
2624 if (PteDst.n.u1Present)
2625 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, pPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst); /** @todo PAGE FLAGS */
2626# endif
2627 /* commit it */
2628 pPTDst->a[iPTDst] = PteDst;
2629 Log4(("SyncPT: BIG %VGv PteDst:{P=%d RW=%d U=%d raw=%08llx}%s\n",
2630 (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,
2631 PteDst.u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2632
2633 /* advance */
2634 GCPhys += PAGE_SIZE;
2635 iHCPage++;
2636 iPTDst++;
2637 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2638 && GCPhys <= pRam->GCPhysLast);
2639 }
2640 else if (pRam)
2641 {
2642 Log(("Invalid pages at %VGp\n", GCPhys));
2643 do
2644 {
2645 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2646 GCPhys += PAGE_SIZE;
2647 iPTDst++;
2648 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2649 && GCPhys < pRam->GCPhys);
2650 }
2651 else
2652 {
2653 Log(("Invalid pages at %VGp (2)\n", GCPhys));
2654 for ( ; iPTDst < RT_ELEMENTS(pPTDst->a); iPTDst++)
2655 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2656 }
2657 } /* while more PTEs */
2658 } /* 4KB / 4MB */
2659 }
2660 else
2661 AssertRelease(!PdeDst.n.u1Present);
2662
2663 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a);
2664# ifdef IN_GC
2665 if (VBOX_FAILURE(rc))
2666 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncPTFailed));
2667# endif
2668 return rc;
2669
2670#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
2671 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2672 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
2673
2674 int rc = VINF_SUCCESS;
2675
2676 /*
2677 * Validate input a little bit.
2678 */
2679# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2680 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
2681 PX86PD pPDDst = pVM->pgm.s.CTXMID(p,32BitPD);
2682# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2683 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; /* no mask; flat index into the 2048 entry array. */
2684 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0];
2685# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2686 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2687 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2688 PX86PDPAE pPDDst;
2689 PX86PDPT pPdptDst;
2690 rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
2691 AssertRCReturn(rc, rc);
2692 Assert(pPDDst);
2693
2694 /* Fetch the pgm pool shadow descriptor. */
2695 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK);
2696 Assert(pShwPde);
2697# elif PGM_SHW_TYPE == PGM_TYPE_EPT
2698 const unsigned iPdpte = (GCPtrPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
2699 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2700 PEPTPD pPDDst;
2701 PEPTPDPT pPdptDst;
2702
2703 rc = PGMShwGetEPTPDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
2704 AssertRCReturn(rc, rc);
2705 Assert(pPDDst);
2706
2707 /* Fetch the pgm pool shadow descriptor. */
2708 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK);
2709 Assert(pShwPde);
2710# endif
2711 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2712 SHWPDE PdeDst = *pPdeDst;
2713
2714 Assert(!(PdeDst.u & PGM_PDFLAGS_MAPPING));
2715 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2716
2717 GSTPDE PdeSrc;
2718 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2719 PdeSrc.n.u1Present = 1;
2720 PdeSrc.n.u1Write = 1;
2721 PdeSrc.n.u1Accessed = 1;
2722 PdeSrc.n.u1User = 1;
2723
2724 /*
2725 * Allocate & map the page table.
2726 */
2727 PSHWPT pPTDst;
2728 PPGMPOOLPAGE pShwPage;
2729 RTGCPHYS GCPhys;
2730
2731 /* Virtual address = physical address */
2732 GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK;
2733# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_EPT
2734 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2735# else
2736 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2737# endif
2738
2739 if ( rc == VINF_SUCCESS
2740 || rc == VINF_PGM_CACHED_PAGE)
2741 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2742 else
2743 AssertMsgFailedReturn(("rc=%Vrc\n", rc), VERR_INTERNAL_ERROR);
2744
2745 PdeDst.u &= X86_PDE_AVL_MASK;
2746 PdeDst.u |= pShwPage->Core.Key;
2747 PdeDst.n.u1Present = 1;
2748 PdeDst.n.u1Write = 1;
2749# if PGM_SHW_TYPE != PGM_TYPE_EPT
2750 PdeDst.n.u1User = 1;
2751 PdeDst.n.u1Accessed = 1;
2752# endif
2753 *pPdeDst = PdeDst;
2754
2755 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
2756 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a);
2757 return rc;
2758
2759#else
2760 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
2761 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a);
2762 return VERR_INTERNAL_ERROR;
2763#endif
2764}
2765
2766
2767
2768/**
2769 * Prefetch a page/set of pages.
2770 *
2771 * Typically used to sync commonly used pages before entering raw mode
2772 * after a CR3 reload.
2773 *
2774 * @returns VBox status code.
2775 * @param pVM VM handle.
2776 * @param GCPtrPage Page to invalidate.
2777 */
2778PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCUINTPTR GCPtrPage)
2779{
2780#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2781 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
2782 /*
2783 * Check that all Guest levels thru the PDE are present, getting the
2784 * PD and PDE in the processes.
2785 */
2786 int rc = VINF_SUCCESS;
2787# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2788# if PGM_GST_TYPE == PGM_TYPE_32BIT
2789 const unsigned iPDSrc = (RTGCUINTPTR)GCPtrPage >> GST_PD_SHIFT;
2790 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
2791# elif PGM_GST_TYPE == PGM_TYPE_PAE
2792 unsigned iPDSrc;
2793 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc);
2794 if (!pPDSrc)
2795 return VINF_SUCCESS; /* not present */
2796# elif PGM_GST_TYPE == PGM_TYPE_AMD64
2797 unsigned iPDSrc;
2798 PX86PML4E pPml4eSrc;
2799 X86PDPE PdpeSrc;
2800 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
2801 if (!pPDSrc)
2802 return VINF_SUCCESS; /* not present */
2803# endif
2804 const GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2805# else
2806 PGSTPD pPDSrc = NULL;
2807 const unsigned iPDSrc = 0;
2808 GSTPDE PdeSrc;
2809
2810 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2811 PdeSrc.n.u1Present = 1;
2812 PdeSrc.n.u1Write = 1;
2813 PdeSrc.n.u1Accessed = 1;
2814 PdeSrc.n.u1User = 1;
2815# endif
2816
2817 if (PdeSrc.n.u1Present && PdeSrc.n.u1Accessed)
2818 {
2819# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2820 const X86PDE PdeDst = pVM->pgm.s.CTXMID(p,32BitPD)->a[GCPtrPage >> SHW_PD_SHIFT];
2821# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2822 const X86PDEPAE PdeDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[GCPtrPage >> SHW_PD_SHIFT];
2823# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2824 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2825 PX86PDPAE pPDDst;
2826 X86PDEPAE PdeDst;
2827
2828# if PGM_GST_TYPE == PGM_TYPE_PROT
2829 /* AMD-V nested paging */
2830 X86PML4E Pml4eSrc;
2831 X86PDPE PdpeSrc;
2832 PX86PML4E pPml4eSrc = &Pml4eSrc;
2833
2834 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
2835 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
2836 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
2837# endif
2838
2839 int rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
2840 AssertReturn(rc == VINF_SUCCESS /* *must* test for VINF_SUCCESS!! */, rc);
2841 Assert(pPDDst);
2842 PdeDst = pPDDst->a[iPDDst];
2843# endif
2844 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
2845 {
2846 if (!PdeDst.n.u1Present)
2847 /** r=bird: This guy will set the A bit on the PDE, probably harmless. */
2848 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
2849 else
2850 {
2851 /** @note We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
2852 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
2853 * makes no sense to prefetch more than one page.
2854 */
2855 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
2856 if (VBOX_SUCCESS(rc))
2857 rc = VINF_SUCCESS;
2858 }
2859 }
2860 }
2861 return rc;
2862#elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
2863 return VINF_SUCCESS; /* ignore */
2864#endif
2865}
2866
2867
2868
2869
2870/**
2871 * Syncs a page during a PGMVerifyAccess() call.
2872 *
2873 * @returns VBox status code (informational included).
2874 * @param GCPtrPage The address of the page to sync.
2875 * @param fPage The effective guest page flags.
2876 * @param uErr The trap error code.
2877 */
2878PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fPage, unsigned uErr)
2879{
2880 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%VGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));
2881
2882 Assert(!HWACCMIsNestedPagingActive(pVM));
2883#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
2884 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
2885
2886# ifndef IN_RING0
2887 if (!(fPage & X86_PTE_US))
2888 {
2889 /*
2890 * Mark this page as safe.
2891 */
2892 /** @todo not correct for pages that contain both code and data!! */
2893 Log(("CSAMMarkPage %VGv; scanned=%d\n", GCPtrPage, true));
2894 CSAMMarkPage(pVM, (RTRCPTR)GCPtrPage, true);
2895 }
2896# endif
2897 /*
2898 * Get guest PD and index.
2899 */
2900
2901# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2902# if PGM_GST_TYPE == PGM_TYPE_32BIT
2903 const unsigned iPDSrc = (RTGCUINTPTR)GCPtrPage >> GST_PD_SHIFT;
2904 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
2905# elif PGM_GST_TYPE == PGM_TYPE_PAE
2906 unsigned iPDSrc;
2907 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc);
2908
2909 if (pPDSrc)
2910 {
2911 Log(("PGMVerifyAccess: access violation for %VGv due to non-present PDPTR\n", GCPtrPage));
2912 return VINF_EM_RAW_GUEST_TRAP;
2913 }
2914# elif PGM_GST_TYPE == PGM_TYPE_AMD64
2915 unsigned iPDSrc;
2916 PX86PML4E pPml4eSrc;
2917 X86PDPE PdpeSrc;
2918 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
2919 if (!pPDSrc)
2920 {
2921 Log(("PGMVerifyAccess: access violation for %VGv due to non-present PDPTR\n", GCPtrPage));
2922 return VINF_EM_RAW_GUEST_TRAP;
2923 }
2924# endif
2925# else
2926 PGSTPD pPDSrc = NULL;
2927 const unsigned iPDSrc = 0;
2928# endif
2929 int rc = VINF_SUCCESS;
2930
2931 /*
2932 * First check if the shadow pd is present.
2933 */
2934# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2935 PX86PDE pPdeDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[GCPtrPage >> SHW_PD_SHIFT];
2936# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2937 PX86PDEPAE pPdeDst = &pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[GCPtrPage >> SHW_PD_SHIFT];
2938# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2939 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2940 PX86PDPAE pPDDst;
2941 PX86PDEPAE pPdeDst;
2942
2943# if PGM_GST_TYPE == PGM_TYPE_PROT
2944 /* AMD-V nested paging */
2945 X86PML4E Pml4eSrc;
2946 X86PDPE PdpeSrc;
2947 PX86PML4E pPml4eSrc = &Pml4eSrc;
2948
2949 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
2950 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
2951 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
2952# endif
2953
2954 rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
2955 AssertReturn(rc == VINF_SUCCESS /* *must* test for VINF_SUCCESS!! */, rc);
2956 Assert(pPDDst);
2957 pPdeDst = &pPDDst->a[iPDDst];
2958# endif
2959 if (!pPdeDst->n.u1Present)
2960 {
2961 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
2962 AssertRC(rc);
2963 if (rc != VINF_SUCCESS)
2964 return rc;
2965 }
2966
2967# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2968 /* Check for dirty bit fault */
2969 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
2970 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
2971 Log(("PGMVerifyAccess: success (dirty)\n"));
2972 else
2973 {
2974 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2975#else
2976 {
2977 GSTPDE PdeSrc;
2978 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2979 PdeSrc.n.u1Present = 1;
2980 PdeSrc.n.u1Write = 1;
2981 PdeSrc.n.u1Accessed = 1;
2982 PdeSrc.n.u1User = 1;
2983
2984#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
2985 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
2986 if (uErr & X86_TRAP_PF_US)
2987 STAM_COUNTER_INC(&pVM->pgm.s.StatGCPageOutOfSyncUser);
2988 else /* supervisor */
2989 STAM_COUNTER_INC(&pVM->pgm.s.StatGCPageOutOfSyncSupervisor);
2990
2991 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
2992 if (VBOX_SUCCESS(rc))
2993 {
2994 /* Page was successfully synced */
2995 Log2(("PGMVerifyAccess: success (sync)\n"));
2996 rc = VINF_SUCCESS;
2997 }
2998 else
2999 {
3000 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", GCPtrPage, rc));
3001 return VINF_EM_RAW_GUEST_TRAP;
3002 }
3003 }
3004 return rc;
3005
3006#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3007
3008 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
3009 return VERR_INTERNAL_ERROR;
3010#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3011}
3012
3013
3014#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
3015# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
3016/**
3017 * Figures out which kind of shadow page this guest PDE warrants.
3018 *
3019 * @returns Shadow page kind.
3020 * @param pPdeSrc The guest PDE in question.
3021 * @param cr4 The current guest cr4 value.
3022 */
3023DECLINLINE(PGMPOOLKIND) PGM_BTH_NAME(CalcPageKind)(const GSTPDE *pPdeSrc, uint32_t cr4)
3024{
3025# if PMG_GST_TYPE == PGM_TYPE_AMD64
3026 if (!pPdeSrc->n.u1Size)
3027# else
3028 if (!pPdeSrc->n.u1Size || !(cr4 & X86_CR4_PSE))
3029# endif
3030 return BTH_PGMPOOLKIND_PT_FOR_PT;
3031 //switch (pPdeSrc->u & (X86_PDE4M_RW | X86_PDE4M_US /*| X86_PDE4M_PAE_NX*/))
3032 //{
3033 // case 0:
3034 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RO;
3035 // case X86_PDE4M_RW:
3036 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW;
3037 // case X86_PDE4M_US:
3038 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US;
3039 // case X86_PDE4M_RW | X86_PDE4M_US:
3040 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US;
3041# if 0
3042 // case X86_PDE4M_PAE_NX:
3043 // return BTH_PGMPOOLKIND_PT_FOR_BIG_NX;
3044 // case X86_PDE4M_RW | X86_PDE4M_PAE_NX:
3045 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_NX;
3046 // case X86_PDE4M_US | X86_PDE4M_PAE_NX:
3047 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US_NX;
3048 // case X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PAE_NX:
3049 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US_NX;
3050# endif
3051 return BTH_PGMPOOLKIND_PT_FOR_BIG;
3052 //}
3053}
3054# endif
3055#endif
3056
3057#undef MY_STAM_COUNTER_INC
3058#define MY_STAM_COUNTER_INC(a) do { } while (0)
3059
3060
3061/**
3062 * Syncs the paging hierarchy starting at CR3.
3063 *
3064 * @returns VBox status code, no specials.
3065 * @param pVM The virtual machine.
3066 * @param cr0 Guest context CR0 register
3067 * @param cr3 Guest context CR3 register
3068 * @param cr4 Guest context CR4 register
3069 * @param fGlobal Including global page directories or not
3070 */
3071PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
3072{
3073 if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
3074 fGlobal = true; /* Change this CR3 reload to be a global one. */
3075
3076#if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3077 /*
3078 * Update page access handlers.
3079 * The virtual are always flushed, while the physical are only on demand.
3080 * WARNING: We are incorrectly not doing global flushing on Virtual Handler updates. We'll
3081 * have to look into that later because it will have a bad influence on the performance.
3082 * @note SvL: There's no need for that. Just invalidate the virtual range(s).
3083 * bird: Yes, but that won't work for aliases.
3084 */
3085 /** @todo this MUST go away. See #1557. */
3086 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3Handlers), h);
3087 PGM_GST_NAME(HandlerVirtualUpdate)(pVM, cr4);
3088 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3Handlers), h);
3089#endif
3090
3091#ifdef PGMPOOL_WITH_MONITORING
3092 int rc = pgmPoolSyncCR3(pVM);
3093 if (rc != VINF_SUCCESS)
3094 return rc;
3095#endif
3096
3097#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3098 /** @todo check if this is really necessary */
3099 HWACCMFlushTLB(pVM);
3100 return VINF_SUCCESS;
3101
3102#elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3103 /* No need to check all paging levels; we zero out the shadow parts when the guest modifies its tables. */
3104 return VINF_SUCCESS;
3105#else
3106
3107 Assert(fGlobal || (cr4 & X86_CR4_PGE));
3108 MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTXMID(Stat,SyncCR3Global) : &pVM->pgm.s.CTXMID(Stat,SyncCR3NotGlobal));
3109
3110# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
3111# if PGM_GST_TYPE == PGM_TYPE_AMD64
3112 bool fBigPagesSupported = true;
3113# else
3114 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3115# endif
3116
3117 /*
3118 * Get page directory addresses.
3119 */
3120# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3121 PX86PDE pPDEDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[0];
3122# else /* PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64*/
3123# if PGM_GST_TYPE == PGM_TYPE_32BIT
3124 PX86PDEPAE pPDEDst = &pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[0];
3125# endif
3126# endif
3127
3128# if PGM_GST_TYPE == PGM_TYPE_32BIT
3129 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
3130 Assert(pPDSrc);
3131# ifndef IN_GC
3132 Assert(PGMPhysGCPhys2HCPtrAssert(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
3133# endif
3134# endif
3135
3136 /*
3137 * Iterate the page directory.
3138 */
3139 PPGMMAPPING pMapping;
3140 unsigned iPdNoMapping;
3141 const bool fRawR0Enabled = EMIsRawRing0Enabled(pVM);
3142 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
3143
3144 /* Only check mappings if they are supposed to be put into the shadow page table. */
3145 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
3146 {
3147 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings);
3148 iPdNoMapping = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U;
3149 }
3150 else
3151 {
3152 pMapping = 0;
3153 iPdNoMapping = ~0U;
3154 }
3155# if PGM_GST_TYPE == PGM_TYPE_AMD64
3156 for (uint64_t iPml4e = 0; iPml4e < X86_PG_PAE_ENTRIES; iPml4e++)
3157 {
3158 PPGMPOOLPAGE pShwPdpt = NULL;
3159 PX86PML4E pPml4eSrc, pPml4eDst;
3160 RTGCPHYS GCPhysPdptSrc;
3161
3162 pPml4eSrc = &pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPml4e];
3163 pPml4eDst = &pVM->pgm.s.CTXMID(p,PaePML4)->a[iPml4e];
3164
3165 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */
3166 if (!pPml4eDst->n.u1Present)
3167 continue;
3168 pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
3169
3170 GCPhysPdptSrc = pPml4eSrc->u & X86_PML4E_PG_MASK_FULL;
3171
3172 /* Anything significant changed? */
3173 if ( pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present
3174 || GCPhysPdptSrc != pShwPdpt->GCPhys)
3175 {
3176 /* Free it. */
3177 LogFlow(("SyncCR3: Out-of-sync PML4E (GCPhys) GCPtr=%VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
3178 (uint64_t)iPml4e << X86_PML4_SHIFT, pShwPdpt->GCPhys, GCPhysPdptSrc, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
3179 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e);
3180 pPml4eDst->u = 0;
3181 continue;
3182 }
3183 /* Force an attribute sync. */
3184 pPml4eDst->n.u1User = pPml4eSrc->n.u1User;
3185 pPml4eDst->n.u1Write = pPml4eSrc->n.u1Write;
3186 pPml4eDst->n.u1NoExecute = pPml4eSrc->n.u1NoExecute;
3187
3188# else
3189 {
3190# endif
3191# if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
3192 for (uint64_t iPdpte = 0; iPdpte < GST_PDPE_ENTRIES; iPdpte++)
3193 {
3194 unsigned iPDSrc;
3195# if PGM_GST_TYPE == PGM_TYPE_PAE
3196 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0];
3197 PX86PDEPAE pPDEDst = &pPDPAE->a[iPdpte * X86_PG_PAE_ENTRIES];
3198 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpte << X86_PDPT_SHIFT, &iPDSrc);
3199 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); NOREF(pPdptDst);
3200 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte];
3201
3202 if (pPDSrc == NULL)
3203 {
3204 /* PDPE not present */
3205 if (pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present)
3206 {
3207 LogFlow(("SyncCR3: guest PDPE %d not present; clear shw pdpe\n", iPdpte));
3208 /* for each page directory entry */
3209 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3210 {
3211 if ( pPDEDst[iPD].n.u1Present
3212 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING))
3213 {
3214 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdpte * X86_PG_PAE_ENTRIES + iPD);
3215 pPDEDst[iPD].u = 0;
3216 }
3217 }
3218 }
3219 if (!(pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].u & PGM_PLXFLAGS_MAPPING))
3220 pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present = 0;
3221 continue;
3222 }
3223# else /* PGM_GST_TYPE != PGM_TYPE_PAE */
3224 PPGMPOOLPAGE pShwPde = NULL;
3225 RTGCPHYS GCPhysPdeSrc;
3226 PX86PDPE pPdpeDst;
3227 PX86PML4E pPml4eSrc;
3228 X86PDPE PdpeSrc;
3229 PX86PDPT pPdptDst;
3230 PX86PDPAE pPDDst;
3231 PX86PDEPAE pPDEDst;
3232 RTGCUINTPTR GCPtr = (iPml4e << X86_PML4_SHIFT) || (iPdpte << X86_PDPT_SHIFT);
3233 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3234
3235 int rc = PGMShwGetLongModePDPtr(pVM, GCPtr, &pPdptDst, &pPDDst);
3236 if (rc != VINF_SUCCESS)
3237 {
3238 if (rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
3239 break; /* next PML4E */
3240
3241 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc));
3242 continue; /* next PDPTE */
3243 }
3244 Assert(pPDDst);
3245 pPDEDst = &pPDDst->a[0];
3246 Assert(iPDSrc == 0);
3247
3248 pPdpeDst = &pPdptDst->a[iPdpte];
3249
3250 /* Fetch the pgm pool shadow descriptor if the shadow pdpte is present. */
3251 if (!pPdpeDst->n.u1Present)
3252 continue; /* next PDPTE */
3253
3254 pShwPde = pgmPoolGetPage(pPool, pPdpeDst->u & X86_PDPE_PG_MASK);
3255 GCPhysPdeSrc = PdpeSrc.u & X86_PDPE_PG_MASK;
3256
3257 /* Anything significant changed? */
3258 if ( PdpeSrc.n.u1Present != pPdpeDst->n.u1Present
3259 || GCPhysPdeSrc != pShwPde->GCPhys)
3260 {
3261 /* Free it. */
3262 LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=%VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
3263 ((uint64_t)iPml4e << X86_PML4_SHIFT) + ((uint64_t)iPdpte << X86_PDPT_SHIFT), pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
3264
3265 /* Mark it as not present if there's no hypervisor mapping present. (bit flipped at the top of Trap0eHandler) */
3266 Assert(!(pPdpeDst->u & PGM_PLXFLAGS_MAPPING));
3267 pgmPoolFreeByPage(pPool, pShwPde, pShwPde->idx, iPdpte);
3268 pPdpeDst->u = 0;
3269 continue; /* next guest PDPTE */
3270 }
3271 /* Force an attribute sync. */
3272 pPdpeDst->lm.u1User = PdpeSrc.lm.u1User;
3273 pPdpeDst->lm.u1Write = PdpeSrc.lm.u1Write;
3274 pPdpeDst->lm.u1NoExecute = PdpeSrc.lm.u1NoExecute;
3275# endif /* PGM_GST_TYPE != PGM_TYPE_PAE */
3276
3277# else /* PGM_GST_TYPE != PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_AMD64 */
3278 {
3279# endif /* PGM_GST_TYPE != PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_AMD64 */
3280 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3281 {
3282# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3283 Assert(&pVM->pgm.s.CTXMID(p,32BitPD)->a[iPD] == pPDEDst);
3284# elif PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3285 AssertMsg(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst, ("%p vs %p\n", &pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512], pPDEDst));
3286# endif
3287 register GSTPDE PdeSrc = pPDSrc->a[iPD];
3288 if ( PdeSrc.n.u1Present
3289 && (PdeSrc.n.u1User || fRawR0Enabled))
3290 {
3291# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3292 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3293 && !defined(PGM_WITHOUT_MAPPINGS)
3294
3295 /*
3296 * Check for conflicts with GC mappings.
3297 */
3298# if PGM_GST_TYPE == PGM_TYPE_PAE
3299 if (iPD + iPdpte * X86_PG_PAE_ENTRIES == iPdNoMapping)
3300# else
3301 if (iPD == iPdNoMapping)
3302# endif
3303 {
3304 if (pVM->pgm.s.fMappingsFixed)
3305 {
3306 /* It's fixed, just skip the mapping. */
3307 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3308 iPD += cPTs - 1;
3309 pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */
3310 pMapping = pMapping->CTXALLSUFF(pNext);
3311 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3312 continue;
3313 }
3314# ifdef IN_RING3
3315# if PGM_GST_TYPE == PGM_TYPE_32BIT
3316 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3317# elif PGM_GST_TYPE == PGM_TYPE_PAE
3318 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpte << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3319# endif
3320 if (VBOX_FAILURE(rc))
3321 return rc;
3322
3323 /*
3324 * Update iPdNoMapping and pMapping.
3325 */
3326 pMapping = pVM->pgm.s.pMappingsR3;
3327 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3328 pMapping = pMapping->pNextR3;
3329 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3330# else
3331 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3332 return VINF_PGM_SYNC_CR3;
3333# endif
3334 }
3335# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3336 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3337# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3338 /*
3339 * Sync page directory entry.
3340 *
3341 * The current approach is to allocated the page table but to set
3342 * the entry to not-present and postpone the page table synching till
3343 * it's actually used.
3344 */
3345# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3346 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3347# elif PGM_GST_TYPE == PGM_TYPE_PAE
3348 const unsigned iPdShw = iPD + iPdpte * X86_PG_PAE_ENTRIES; NOREF(iPdShw);
3349# else
3350 const unsigned iPdShw = iPD; NOREF(iPdShw);
3351# endif
3352 {
3353 SHWPDE PdeDst = *pPDEDst;
3354 if (PdeDst.n.u1Present)
3355 {
3356 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
3357 RTGCPHYS GCPhys;
3358 if ( !PdeSrc.b.u1Size
3359 || !fBigPagesSupported)
3360 {
3361 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
3362# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3363 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
3364 GCPhys |= i * (PAGE_SIZE / 2);
3365# endif
3366 }
3367 else
3368 {
3369 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
3370# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3371 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
3372 GCPhys |= i * X86_PAGE_2M_SIZE;
3373# endif
3374 }
3375
3376 if ( pShwPage->GCPhys == GCPhys
3377 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)
3378 && ( pShwPage->fCached
3379 || ( !fGlobal
3380 && ( false
3381# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
3382 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3383# if PGM_GST_TYPE == PGM_TYPE_AMD64
3384 && (cr4 & X86_CR4_PGE)) /* global 2/4MB page. */
3385# else
3386 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */
3387# endif
3388 || ( !pShwPage->fSeenNonGlobal
3389 && (cr4 & X86_CR4_PGE))
3390# endif
3391 )
3392 )
3393 )
3394 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))
3395 || ( fBigPagesSupported
3396 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)
3397 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))
3398 )
3399 )
3400 {
3401# ifdef VBOX_WITH_STATISTICS
3402 if ( !fGlobal
3403 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3404# if PGM_GST_TYPE == PGM_TYPE_AMD64
3405 && (cr4 & X86_CR4_PGE)) /* global 2/4MB page. */
3406# else
3407 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))
3408# endif
3409 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPD));
3410 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))
3411 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPT));
3412 else
3413 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstCacheHit));
3414# endif /* VBOX_WITH_STATISTICS */
3415 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
3416 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
3417 //# ifdef PGMPOOL_WITH_CACHE
3418 // pgmPoolCacheUsed(pPool, pShwPage);
3419 //# endif
3420 }
3421 else
3422 {
3423# if PGM_GST_TYPE == PGM_TYPE_AMD64
3424 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPdShw);
3425# else
3426 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);
3427# endif
3428 pPDEDst->u = 0;
3429 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreed));
3430 }
3431 }
3432 else
3433 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstNotPresent));
3434 pPDEDst++;
3435 }
3436 }
3437# if PGM_GST_TYPE == PGM_TYPE_PAE
3438 else if (iPD + iPdpte * X86_PG_PAE_ENTRIES != iPdNoMapping)
3439# else
3440 else if (iPD != iPdNoMapping)
3441# endif
3442 {
3443 /*
3444 * Check if there is any page directory to mark not present here.
3445 */
3446# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3447 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3448# elif PGM_GST_TYPE == PGM_TYPE_PAE
3449 const unsigned iPdShw = iPD + iPdpte * X86_PG_PAE_ENTRIES; NOREF(iPdShw);
3450# else
3451 const unsigned iPdShw = iPD; NOREF(iPdShw);
3452# endif
3453 {
3454 if (pPDEDst->n.u1Present)
3455 {
3456# if PGM_GST_TYPE == PGM_TYPE_AMD64
3457 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), pShwPde->idx, iPdShw);
3458# else
3459 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw);
3460# endif
3461 pPDEDst->u = 0;
3462 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreedSrcNP));
3463 }
3464 pPDEDst++;
3465 }
3466 }
3467 else
3468 {
3469# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3470 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3471 && !defined(PGM_WITHOUT_MAPPINGS)
3472
3473 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3474
3475 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3476 if (pVM->pgm.s.fMappingsFixed)
3477 {
3478 /* It's fixed, just skip the mapping. */
3479 pMapping = pMapping->CTXALLSUFF(pNext);
3480 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3481 }
3482 else
3483 {
3484 /*
3485 * Check for conflicts for subsequent pagetables
3486 * and advance to the next mapping.
3487 */
3488 iPdNoMapping = ~0U;
3489 unsigned iPT = cPTs;
3490 while (iPT-- > 1)
3491 {
3492 if ( pPDSrc->a[iPD + iPT].n.u1Present
3493 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))
3494 {
3495# ifdef IN_RING3
3496# if PGM_GST_TYPE == PGM_TYPE_32BIT
3497 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3498# elif PGM_GST_TYPE == PGM_TYPE_PAE
3499 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpte << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3500# endif
3501 if (VBOX_FAILURE(rc))
3502 return rc;
3503
3504 /*
3505 * Update iPdNoMapping and pMapping.
3506 */
3507 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings);
3508 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3509 pMapping = pMapping->CTXALLSUFF(pNext);
3510 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3511 break;
3512# else
3513 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3514 return VINF_PGM_SYNC_CR3;
3515# endif
3516 }
3517 }
3518 if (iPdNoMapping == ~0U && pMapping)
3519 {
3520 pMapping = pMapping->CTXALLSUFF(pNext);
3521 if (pMapping)
3522 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT;
3523 }
3524 }
3525
3526 /* advance. */
3527 iPD += cPTs - 1;
3528 pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */
3529# if PGM_GST_TYPE != PGM_SHW_TYPE
3530 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE);
3531# endif
3532# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3533 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3534# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3535 }
3536
3537 } /* for iPD */
3538 } /* for each PDPTE (PAE) */
3539 } /* for each page map level 4 entry (amd64) */
3540 return VINF_SUCCESS;
3541
3542# else /* guest real and protected mode */
3543 return VINF_SUCCESS;
3544# endif
3545#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
3546}
3547
3548
3549
3550
3551#ifdef VBOX_STRICT
3552#ifdef IN_GC
3553# undef AssertMsgFailed
3554# define AssertMsgFailed Log
3555#endif
3556#ifdef IN_RING3
3557# include <VBox/dbgf.h>
3558
3559/**
3560 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
3561 *
3562 * @returns VBox status code (VINF_SUCCESS).
3563 * @param pVM The VM handle.
3564 * @param cr3 The root of the hierarchy.
3565 * @param crr The cr4, only PAE and PSE is currently used.
3566 * @param fLongMode Set if long mode, false if not long mode.
3567 * @param cMaxDepth Number of levels to dump.
3568 * @param pHlp Pointer to the output functions.
3569 */
3570__BEGIN_DECLS
3571VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
3572__END_DECLS
3573
3574#endif
3575
3576/**
3577 * Checks that the shadow page table is in sync with the guest one.
3578 *
3579 * @returns The number of errors.
3580 * @param pVM The virtual machine.
3581 * @param cr3 Guest context CR3 register
3582 * @param cr4 Guest context CR4 register
3583 * @param GCPtr Where to start. Defaults to 0.
3584 * @param cb How much to check. Defaults to everything.
3585 */
3586PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb)
3587{
3588#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3589 return 0;
3590#else
3591 unsigned cErrors = 0;
3592
3593#if PGM_GST_TYPE == PGM_TYPE_PAE
3594 /* @todo currently broken; crashes below somewhere */
3595 AssertFailed();
3596#endif
3597
3598#if PGM_GST_TYPE == PGM_TYPE_32BIT \
3599 || PGM_GST_TYPE == PGM_TYPE_PAE \
3600 || PGM_GST_TYPE == PGM_TYPE_AMD64
3601
3602# if PGM_GST_TYPE == PGM_TYPE_AMD64
3603 bool fBigPagesSupported = true;
3604# else
3605 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3606# endif
3607 PPGM pPGM = &pVM->pgm.s;
3608 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */
3609 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */
3610# ifndef IN_RING0
3611 RTHCPHYS HCPhys; /* general usage. */
3612# endif
3613 int rc;
3614
3615 /*
3616 * Check that the Guest CR3 and all its mappings are correct.
3617 */
3618 AssertMsgReturn(pPGM->GCPhysCR3 == (cr3 & GST_CR3_PAGE_MASK),
3619 ("Invalid GCPhysCR3=%VGp cr3=%VGp\n", pPGM->GCPhysCR3, (RTGCPHYS)cr3),
3620 false);
3621# if !defined(IN_RING0) && PGM_GST_TYPE != PGM_TYPE_AMD64
3622# if PGM_GST_TYPE == PGM_TYPE_32BIT
3623 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGuestPDGC, NULL, &HCPhysShw);
3624# else
3625 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGstPaePDPTGC, NULL, &HCPhysShw);
3626# endif
3627 AssertRCReturn(rc, 1);
3628 HCPhys = NIL_RTHCPHYS;
3629 rc = pgmRamGCPhys2HCPhys(pPGM, cr3 & GST_CR3_PAGE_MASK, &HCPhys);
3630 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%VHp HCPhyswShw=%VHp (cr3)\n", HCPhys, HCPhysShw), false);
3631# if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3)
3632 RTGCPHYS GCPhys;
3633 rc = PGMR3DbgHCPtr2GCPhys(pVM, pPGM->pGuestPDHC, &GCPhys);
3634 AssertRCReturn(rc, 1);
3635 AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%VGp cr3=%VGp\n", GCPhys, (RTGCPHYS)cr3), false);
3636# endif
3637#endif /* !IN_RING0 */
3638
3639 /*
3640 * Get and check the Shadow CR3.
3641 */
3642# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3643 unsigned cPDEs = X86_PG_ENTRIES;
3644 unsigned ulIncrement = X86_PG_ENTRIES * PAGE_SIZE;
3645# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3646# if PGM_GST_TYPE == PGM_TYPE_32BIT
3647 unsigned cPDEs = X86_PG_PAE_ENTRIES * 4; /* treat it as a 2048 entry table. */
3648# else
3649 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3650# endif
3651 unsigned ulIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3652# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3653 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3654 unsigned ulIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3655# endif
3656 if (cb != ~(RTGCUINTPTR)0)
3657 cPDEs = RT_MIN(cb >> SHW_PD_SHIFT, 1);
3658
3659/** @todo call the other two PGMAssert*() functions. */
3660
3661# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3662 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
3663# endif
3664
3665# if PGM_GST_TYPE == PGM_TYPE_AMD64
3666 unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3667
3668 for (; iPml4e < X86_PG_PAE_ENTRIES; iPml4e++)
3669 {
3670 PPGMPOOLPAGE pShwPdpt = NULL;
3671 PX86PML4E pPml4eSrc, pPml4eDst;
3672 RTGCPHYS GCPhysPdptSrc;
3673
3674 pPml4eSrc = &pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPml4e];
3675 pPml4eDst = &pVM->pgm.s.CTXMID(p,PaePML4)->a[iPml4e];
3676
3677 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */
3678 if (!pPml4eDst->n.u1Present)
3679 {
3680 GCPtr += UINT64_C(_2M * 512 * 512);
3681 continue;
3682 }
3683
3684# if PGM_GST_TYPE == PGM_TYPE_PAE
3685 /* not correct to call pgmPoolGetPage */
3686 AssertFailed();
3687# endif
3688 pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
3689 GCPhysPdptSrc = pPml4eSrc->u & X86_PML4E_PG_MASK_FULL;
3690
3691 if (pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present)
3692 {
3693 AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3694 GCPtr += UINT64_C(_2M * 512 * 512);
3695 cErrors++;
3696 continue;
3697 }
3698
3699 if (GCPhysPdptSrc != pShwPdpt->GCPhys)
3700 {
3701 AssertMsgFailed(("Physical address doesn't match! iPml4e %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4e, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
3702 GCPtr += UINT64_C(_2M * 512 * 512);
3703 cErrors++;
3704 continue;
3705 }
3706
3707 if ( pPml4eDst->n.u1User != pPml4eSrc->n.u1User
3708 || pPml4eDst->n.u1Write != pPml4eSrc->n.u1Write
3709 || pPml4eDst->n.u1NoExecute != pPml4eSrc->n.u1NoExecute)
3710 {
3711 AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3712 GCPtr += UINT64_C(_2M * 512 * 512);
3713 cErrors++;
3714 continue;
3715 }
3716# else
3717 {
3718# endif
3719
3720# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3721 /*
3722 * Check the PDPTEs too.
3723 */
3724 unsigned iPdpte = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
3725
3726 for (;iPdpte <= SHW_PDPT_MASK; iPdpte++)
3727 {
3728 unsigned iPDSrc;
3729 PPGMPOOLPAGE pShwPde = NULL;
3730 PX86PDPE pPdpeDst;
3731 RTGCPHYS GCPhysPdeSrc;
3732# if PGM_GST_TYPE == PGM_TYPE_PAE
3733 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0];
3734 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtr, &iPDSrc);
3735 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT);
3736 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte];
3737# else
3738 PX86PML4E pPml4eSrc;
3739 X86PDPE PdpeSrc;
3740 PX86PDPT pPdptDst;
3741 PX86PDPAE pPDDst;
3742 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3743
3744 rc = PGMShwGetLongModePDPtr(pVM, GCPtr, &pPdptDst, &pPDDst);
3745 if (rc != VINF_SUCCESS)
3746 {
3747 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc));
3748 GCPtr += 512 * _2M;
3749 continue; /* next PDPTE */
3750 }
3751 Assert(pPDDst);
3752# endif
3753 Assert(iPDSrc == 0);
3754
3755 pPdpeDst = &pPdptDst->a[iPdpte];
3756
3757 if (!pPdpeDst->n.u1Present)
3758 {
3759 GCPtr += 512 * _2M;
3760 continue; /* next PDPTE */
3761 }
3762
3763 pShwPde = pgmPoolGetPage(pPool, pPdpeDst->u & X86_PDPE_PG_MASK);
3764 GCPhysPdeSrc = PdpeSrc.u & X86_PDPE_PG_MASK;
3765
3766 if (pPdpeDst->n.u1Present != PdpeSrc.n.u1Present)
3767 {
3768 AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3769 GCPtr += 512 * _2M;
3770 cErrors++;
3771 continue;
3772 }
3773
3774 if (GCPhysPdeSrc != pShwPde->GCPhys)
3775 {
3776# if PGM_GST_TYPE == PGM_TYPE_AMD64
3777 AssertMsgFailed(("Physical address doesn't match! iPml4e %d iPdpte %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4e, iPdpte, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3778# else
3779 AssertMsgFailed(("Physical address doesn't match! iPdpte %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpte, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3780# endif
3781 GCPtr += 512 * _2M;
3782 cErrors++;
3783 continue;
3784 }
3785
3786# if PGM_GST_TYPE == PGM_TYPE_AMD64
3787 if ( pPdpeDst->lm.u1User != PdpeSrc.lm.u1User
3788 || pPdpeDst->lm.u1Write != PdpeSrc.lm.u1Write
3789 || pPdpeDst->lm.u1NoExecute != PdpeSrc.lm.u1NoExecute)
3790 {
3791 AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3792 GCPtr += 512 * _2M;
3793 cErrors++;
3794 continue;
3795 }
3796# endif
3797
3798# else
3799 {
3800# endif
3801# if PGM_GST_TYPE == PGM_TYPE_32BIT
3802 const GSTPD *pPDSrc = CTXSUFF(pPGM->pGuestPD);
3803# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3804 const X86PD *pPDDst = pPGM->CTXMID(p,32BitPD);
3805# else
3806 const PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* We treat this as a PD with 2048 entries, so no need to and with SHW_PD_MASK to get iPDDst */
3807# endif
3808# endif
3809 /*
3810 * Iterate the shadow page directory.
3811 */
3812 GCPtr = (GCPtr >> SHW_PD_SHIFT) << SHW_PD_SHIFT;
3813 unsigned iPDDst = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
3814
3815 for (;
3816 iPDDst < cPDEs;
3817 iPDDst++, GCPtr += ulIncrement)
3818 {
3819 const SHWPDE PdeDst = pPDDst->a[iPDDst];
3820 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
3821 {
3822 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3823 if ((PdeDst.u & X86_PDE_AVL_MASK) != PGM_PDFLAGS_MAPPING)
3824 {
3825 AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
3826 cErrors++;
3827 continue;
3828 }
3829 }
3830 else if ( (PdeDst.u & X86_PDE_P)
3831 || ((PdeDst.u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY))
3832 )
3833 {
3834 HCPhysShw = PdeDst.u & SHW_PDE_PG_MASK;
3835 PPGMPOOLPAGE pPoolPage = pgmPoolGetPageByHCPhys(pVM, HCPhysShw);
3836 if (!pPoolPage)
3837 {
3838 AssertMsgFailed(("Invalid page table address %VGp at %VGv! PdeDst=%#RX64\n",
3839 HCPhysShw, GCPtr, (uint64_t)PdeDst.u));
3840 cErrors++;
3841 continue;
3842 }
3843 const SHWPT *pPTDst = (const SHWPT *)PGMPOOL_PAGE_2_PTR(pVM, pPoolPage);
3844
3845 if (PdeDst.u & (X86_PDE4M_PWT | X86_PDE4M_PCD))
3846 {
3847 AssertMsgFailed(("PDE flags PWT and/or PCD is set at %VGv! These flags are not virtualized! PdeDst=%#RX64\n",
3848 GCPtr, (uint64_t)PdeDst.u));
3849 cErrors++;
3850 }
3851
3852 if (PdeDst.u & (X86_PDE4M_G | X86_PDE4M_D))
3853 {
3854 AssertMsgFailed(("4K PDE reserved flags at %VGv! PdeDst=%#RX64\n",
3855 GCPtr, (uint64_t)PdeDst.u));
3856 cErrors++;
3857 }
3858
3859 const GSTPDE PdeSrc = pPDSrc->a[(iPDDst >> (GST_PD_SHIFT - SHW_PD_SHIFT)) & GST_PD_MASK];
3860 if (!PdeSrc.n.u1Present)
3861 {
3862 AssertMsgFailed(("Guest PDE at %VGv is not present! PdeDst=%#RX64 PdeSrc=%#RX64\n",
3863 GCPtr, (uint64_t)PdeDst.u, (uint64_t)PdeSrc.u));
3864 cErrors++;
3865 continue;
3866 }
3867
3868 if ( !PdeSrc.b.u1Size
3869 || !fBigPagesSupported)
3870 {
3871 GCPhysGst = PdeSrc.u & GST_PDE_PG_MASK;
3872# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3873 GCPhysGst |= (iPDDst & 1) * (PAGE_SIZE / 2);
3874# endif
3875 }
3876 else
3877 {
3878# if PGM_GST_TYPE == PGM_TYPE_32BIT
3879 if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK)
3880 {
3881 AssertMsgFailed(("Guest PDE at %VGv is using PSE36 or similar! PdeSrc=%#RX64\n",
3882 GCPtr, (uint64_t)PdeSrc.u));
3883 cErrors++;
3884 continue;
3885 }
3886# endif
3887 GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
3888# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3889 GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT);
3890# endif
3891 }
3892
3893 if ( pPoolPage->enmKind
3894 != (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
3895 {
3896 AssertMsgFailed(("Invalid shadow page table kind %d at %VGv! PdeSrc=%#RX64\n",
3897 pPoolPage->enmKind, GCPtr, (uint64_t)PdeSrc.u));
3898 cErrors++;
3899 }
3900
3901 PPGMPAGE pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
3902 if (!pPhysPage)
3903 {
3904 AssertMsgFailed(("Cannot find guest physical address %VGp in the PDE at %VGv! PdeSrc=%#RX64\n",
3905 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
3906 cErrors++;
3907 continue;
3908 }
3909
3910 if (GCPhysGst != pPoolPage->GCPhys)
3911 {
3912 AssertMsgFailed(("GCPhysGst=%VGp != pPage->GCPhys=%VGp at %VGv\n",
3913 GCPhysGst, pPoolPage->GCPhys, GCPtr));
3914 cErrors++;
3915 continue;
3916 }
3917
3918 if ( !PdeSrc.b.u1Size
3919 || !fBigPagesSupported)
3920 {
3921 /*
3922 * Page Table.
3923 */
3924 const GSTPT *pPTSrc;
3925 rc = PGM_GCPHYS_2_PTR(pVM, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
3926 if (VBOX_FAILURE(rc))
3927 {
3928 AssertMsgFailed(("Cannot map/convert guest physical address %VGp in the PDE at %VGv! PdeSrc=%#RX64\n",
3929 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
3930 cErrors++;
3931 continue;
3932 }
3933 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/))
3934 != (PdeDst.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/)))
3935 {
3936 /// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
3937 // (This problem will go away when/if we shadow multiple CR3s.)
3938 AssertMsgFailed(("4K PDE flags mismatch at %VGv! PdeSrc=%#RX64 PdeDst=%#RX64\n",
3939 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
3940 cErrors++;
3941 continue;
3942 }
3943 if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
3944 {
3945 AssertMsgFailed(("4K PDEs cannot have PGM_PDFLAGS_TRACK_DIRTY set! GCPtr=%VGv PdeDst=%#RX64\n",
3946 GCPtr, (uint64_t)PdeDst.u));
3947 cErrors++;
3948 continue;
3949 }
3950
3951 /* iterate the page table. */
3952# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3953 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
3954 const unsigned offPTSrc = ((GCPtr >> SHW_PD_SHIFT) & 1) * 512;
3955# else
3956 const unsigned offPTSrc = 0;
3957# endif
3958 for (unsigned iPT = 0, off = 0;
3959 iPT < RT_ELEMENTS(pPTDst->a);
3960 iPT++, off += PAGE_SIZE)
3961 {
3962 const SHWPTE PteDst = pPTDst->a[iPT];
3963
3964 /* skip not-present entries. */
3965 if (!(PteDst.u & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
3966 continue;
3967 Assert(PteDst.n.u1Present);
3968
3969 const GSTPTE PteSrc = pPTSrc->a[iPT + offPTSrc];
3970 if (!PteSrc.n.u1Present)
3971 {
3972# ifdef IN_RING3
3973 PGMAssertHandlerAndFlagsInSync(pVM);
3974 PGMR3DumpHierarchyGC(pVM, cr3, cr4, (PdeSrc.u & GST_PDE_PG_MASK));
3975# endif
3976 AssertMsgFailed(("Out of sync (!P) PTE at %VGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%VGv iPTSrc=%x PdeSrc=%x physpte=%VGp\n",
3977 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u, pPTSrc, iPT + offPTSrc, PdeSrc.au32[0],
3978 (PdeSrc.u & GST_PDE_PG_MASK) + (iPT + offPTSrc)*sizeof(PteSrc)));
3979 cErrors++;
3980 continue;
3981 }
3982
3983 uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
3984# if 1 /** @todo sync accessed bit properly... */
3985 fIgnoreFlags |= X86_PTE_A;
3986# endif
3987
3988 /* match the physical addresses */
3989 HCPhysShw = PteDst.u & SHW_PTE_PG_MASK;
3990 GCPhysGst = PteSrc.u & GST_PTE_PG_MASK;
3991
3992# ifdef IN_RING3
3993 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
3994 if (VBOX_FAILURE(rc))
3995 {
3996 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
3997 {
3998 AssertMsgFailed(("Cannot find guest physical address %VGp at %VGv! PteSrc=%#RX64 PteDst=%#RX64\n",
3999 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4000 cErrors++;
4001 continue;
4002 }
4003 }
4004 else if (HCPhysShw != (HCPhys & SHW_PTE_PG_MASK))
4005 {
4006 AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PteSrc=%#RX64 PteDst=%#RX64\n",
4007 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4008 cErrors++;
4009 continue;
4010 }
4011# endif
4012
4013 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4014 if (!pPhysPage)
4015 {
4016# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4017 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4018 {
4019 AssertMsgFailed(("Cannot find guest physical address %VGp at %VGv! PteSrc=%#RX64 PteDst=%#RX64\n",
4020 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4021 cErrors++;
4022 continue;
4023 }
4024# endif
4025 if (PteDst.n.u1Write)
4026 {
4027 AssertMsgFailed(("Invalid guest page at %VGv is writable! GCPhysGst=%VGp PteSrc=%#RX64 PteDst=%#RX64\n",
4028 GCPtr + off, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4029 cErrors++;
4030 }
4031 fIgnoreFlags |= X86_PTE_RW;
4032 }
4033 else if (HCPhysShw != (PGM_PAGE_GET_HCPHYS(pPhysPage) & SHW_PTE_PG_MASK))
4034 {
4035 AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PteSrc=%#RX64 PteDst=%#RX64\n",
4036 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4037 cErrors++;
4038 continue;
4039 }
4040
4041 /* flags */
4042 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4043 {
4044 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4045 {
4046 if (PteDst.n.u1Write)
4047 {
4048 AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=%VGv PteSrc=%#RX64 PteDst=%#RX64\n",
4049 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4050 cErrors++;
4051 continue;
4052 }
4053 fIgnoreFlags |= X86_PTE_RW;
4054 }
4055 else
4056 {
4057 if (PteDst.n.u1Present)
4058 {
4059 AssertMsgFailed(("ALL access flagged at %VGv but the page is present! HCPhys=%VHp PteSrc=%#RX64 PteDst=%#RX64\n",
4060 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4061 cErrors++;
4062 continue;
4063 }
4064 fIgnoreFlags |= X86_PTE_P;
4065 }
4066 }
4067 else
4068 {
4069 if (!PteSrc.n.u1Dirty && PteSrc.n.u1Write)
4070 {
4071 if (PteDst.n.u1Write)
4072 {
4073 AssertMsgFailed(("!DIRTY page at %VGv is writable! PteSrc=%#RX64 PteDst=%#RX64\n",
4074 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4075 cErrors++;
4076 continue;
4077 }
4078 if (!(PteDst.u & PGM_PTFLAGS_TRACK_DIRTY))
4079 {
4080 AssertMsgFailed(("!DIRTY page at %VGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4081 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4082 cErrors++;
4083 continue;
4084 }
4085 if (PteDst.n.u1Dirty)
4086 {
4087 AssertMsgFailed(("!DIRTY page at %VGv is marked DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4088 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4089 cErrors++;
4090 }
4091# if 0 /** @todo sync access bit properly... */
4092 if (PteDst.n.u1Accessed != PteSrc.n.u1Accessed)
4093 {
4094 AssertMsgFailed(("!DIRTY page at %VGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4095 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4096 cErrors++;
4097 }
4098 fIgnoreFlags |= X86_PTE_RW;
4099# else
4100 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4101# endif
4102 }
4103 else if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4104 {
4105 /* access bit emulation (not implemented). */
4106 if (PteSrc.n.u1Accessed || PteDst.n.u1Present)
4107 {
4108 AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %VGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
4109 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4110 cErrors++;
4111 continue;
4112 }
4113 if (!PteDst.n.u1Accessed)
4114 {
4115 AssertMsgFailed(("!ACCESSED page at %VGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
4116 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4117 cErrors++;
4118 }
4119 fIgnoreFlags |= X86_PTE_P;
4120 }
4121# ifdef DEBUG_sandervl
4122 fIgnoreFlags |= X86_PTE_D | X86_PTE_A;
4123# endif
4124 }
4125
4126 if ( (PteSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4127 && (PteSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags)
4128 )
4129 {
4130 AssertMsgFailed(("Flags mismatch at %VGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
4131 GCPtr + off, (uint64_t)PteSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4132 fIgnoreFlags, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4133 cErrors++;
4134 continue;
4135 }
4136 } /* foreach PTE */
4137 }
4138 else
4139 {
4140 /*
4141 * Big Page.
4142 */
4143 uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
4144 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
4145 {
4146 if (PdeDst.n.u1Write)
4147 {
4148 AssertMsgFailed(("!DIRTY page at %VGv is writable! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4149 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4150 cErrors++;
4151 continue;
4152 }
4153 if (!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY))
4154 {
4155 AssertMsgFailed(("!DIRTY page at %VGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4156 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4157 cErrors++;
4158 continue;
4159 }
4160# if 0 /** @todo sync access bit properly... */
4161 if (PdeDst.n.u1Accessed != PdeSrc.b.u1Accessed)
4162 {
4163 AssertMsgFailed(("!DIRTY page at %VGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4164 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4165 cErrors++;
4166 }
4167 fIgnoreFlags |= X86_PTE_RW;
4168# else
4169 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4170# endif
4171 }
4172 else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4173 {
4174 /* access bit emulation (not implemented). */
4175 if (PdeSrc.b.u1Accessed || PdeDst.n.u1Present)
4176 {
4177 AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %VGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4178 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4179 cErrors++;
4180 continue;
4181 }
4182 if (!PdeDst.n.u1Accessed)
4183 {
4184 AssertMsgFailed(("!ACCESSED page at %VGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4185 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4186 cErrors++;
4187 }
4188 fIgnoreFlags |= X86_PTE_P;
4189 }
4190
4191 if ((PdeSrc.u & ~fIgnoreFlags) != (PdeDst.u & ~fIgnoreFlags))
4192 {
4193 AssertMsgFailed(("Flags mismatch (B) at %VGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
4194 GCPtr, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PdeDst.u & ~fIgnoreFlags,
4195 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4196 cErrors++;
4197 }
4198
4199 /* iterate the page table. */
4200 for (unsigned iPT = 0, off = 0;
4201 iPT < RT_ELEMENTS(pPTDst->a);
4202 iPT++, off += PAGE_SIZE, GCPhysGst += PAGE_SIZE)
4203 {
4204 const SHWPTE PteDst = pPTDst->a[iPT];
4205
4206 if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4207 {
4208 AssertMsgFailed(("The PTE at %VGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
4209 GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4210 cErrors++;
4211 }
4212
4213 /* skip not-present entries. */
4214 if (!PteDst.n.u1Present) /** @todo deal with ALL handlers and CSAM !P pages! */
4215 continue;
4216
4217 fIgnoreFlags = X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT | X86_PTE_D | X86_PTE_A | X86_PTE_G | X86_PTE_PAE_NX;
4218
4219 /* match the physical addresses */
4220 HCPhysShw = PteDst.u & X86_PTE_PAE_PG_MASK;
4221
4222# ifdef IN_RING3
4223 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4224 if (VBOX_FAILURE(rc))
4225 {
4226 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4227 {
4228 AssertMsgFailed(("Cannot find guest physical address %VGp at %VGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4229 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4230 cErrors++;
4231 }
4232 }
4233 else if (HCPhysShw != (HCPhys & X86_PTE_PAE_PG_MASK))
4234 {
4235 AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4236 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4237 cErrors++;
4238 continue;
4239 }
4240# endif
4241 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4242 if (!pPhysPage)
4243 {
4244# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4245 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4246 {
4247 AssertMsgFailed(("Cannot find guest physical address %VGp at %VGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4248 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4249 cErrors++;
4250 continue;
4251 }
4252# endif
4253 if (PteDst.n.u1Write)
4254 {
4255 AssertMsgFailed(("Invalid guest page at %VGv is writable! GCPhysGst=%VGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4256 GCPtr + off, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4257 cErrors++;
4258 }
4259 fIgnoreFlags |= X86_PTE_RW;
4260 }
4261 else if (HCPhysShw != (pPhysPage->HCPhys & X86_PTE_PAE_PG_MASK))
4262 {
4263 AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4264 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4265 cErrors++;
4266 continue;
4267 }
4268
4269 /* flags */
4270 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4271 {
4272 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4273 {
4274 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
4275 {
4276 if (PteDst.n.u1Write)
4277 {
4278 AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=%VGv PdeSrc=%#RX64 PteDst=%#RX64\n",
4279 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4280 cErrors++;
4281 continue;
4282 }
4283 fIgnoreFlags |= X86_PTE_RW;
4284 }
4285 }
4286 else
4287 {
4288 if (PteDst.n.u1Present)
4289 {
4290 AssertMsgFailed(("ALL access flagged at %VGv but the page is present! HCPhys=%VGv PdeSrc=%#RX64 PteDst=%#RX64\n",
4291 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4292 cErrors++;
4293 continue;
4294 }
4295 fIgnoreFlags |= X86_PTE_P;
4296 }
4297 }
4298
4299 if ( (PdeSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4300 && (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags) /* lazy phys handler dereg. */
4301 )
4302 {
4303 AssertMsgFailed(("Flags mismatch (BT) at %VGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
4304 GCPtr + off, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4305 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4306 cErrors++;
4307 continue;
4308 }
4309 } /* for each PTE */
4310 }
4311 }
4312 /* not present */
4313
4314 } /* for each PDE */
4315
4316 } /* for each PDPTE */
4317
4318 } /* for each PML4E */
4319
4320# ifdef DEBUG
4321 if (cErrors)
4322 LogFlow(("AssertCR3: cErrors=%d\n", cErrors));
4323# endif
4324
4325#endif
4326 return cErrors;
4327
4328#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
4329}
4330#endif /* VBOX_STRICT */
4331
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette