VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllBth.h@ 17279

Last change on this file since 17279 was 17279, checked in by vboxsync, 16 years ago

PGM,MM: Attacking the shadow page pool tracking info stored in PGMPPAGE, replacing direct access with PGM_PAGE_SET/GET_XXXX access. Required to get rid of MM_RAM_FLAGS_* and to be able to restructure PGMPAGE. MM_RAM_FLAGS_NO_REFS_MASK fully eliminated already.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 208.0 KB
Line 
1/* $Id: PGMAllBth.h 17279 2009-03-03 14:05:15Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
4 *
5 * This file is a big challenge!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Internal Functions *
26*******************************************************************************/
27__BEGIN_DECLS
28PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
29PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCPTR GCPtrPage);
30PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
31PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
32PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
33PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCPTR Addr, unsigned fPage, unsigned uErr);
34PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCPTR GCPtrPage);
35PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
36#ifdef VBOX_STRICT
37PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
38#endif
39#ifdef PGMPOOL_WITH_USER_TRACKING
40DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
41#endif
42PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
43PGM_BTH_DECL(int, UnmapCR3)(PVM pVM);
44__END_DECLS
45
46
47/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
48#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
49# error "Invalid combination; PAE guest implies PAE shadow"
50#endif
51
52#if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
53 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
54# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
55#endif
56
57#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
58 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
59# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
60#endif
61
62#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT) \
63 || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
64# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
65#endif
66
67#ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */
68# define PGM_WITHOUT_MAPPINGS
69#endif
70
71
72#ifndef IN_RING3
73/**
74 * #PF Handler for raw-mode guest execution.
75 *
76 * @returns VBox status code (appropriate for trap handling and GC return).
77 * @param pVM VM Handle.
78 * @param uErr The trap error code.
79 * @param pRegFrame Trap register frame.
80 * @param pvFault The fault address.
81 */
82PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
83{
84# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && defined(VBOX_STRICT)
85 PGMDynCheckLocks(pVM);
86# endif
87
88# if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
89 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
90 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
91
92# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
93 /*
94 * Hide the instruction fetch trap indicator for now.
95 */
96 /** @todo NXE will change this and we must fix NXE in the switcher too! */
97 if (uErr & X86_TRAP_PF_ID)
98 {
99 uErr &= ~X86_TRAP_PF_ID;
100 TRPMSetErrorCode(pVM, uErr);
101 }
102# endif
103
104 /*
105 * Get PDs.
106 */
107 int rc;
108# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
109# if PGM_GST_TYPE == PGM_TYPE_32BIT
110 const unsigned iPDSrc = pvFault >> GST_PD_SHIFT;
111 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
112
113# elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
114
115# if PGM_GST_TYPE == PGM_TYPE_PAE
116 unsigned iPDSrc;
117# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
118 X86PDPE PdpeSrc;
119 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, &PdpeSrc);
120# else
121 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, NULL);
122# endif
123
124# elif PGM_GST_TYPE == PGM_TYPE_AMD64
125 unsigned iPDSrc;
126 PX86PML4E pPml4eSrc;
127 X86PDPE PdpeSrc;
128 PGSTPD pPDSrc;
129
130 pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
131 Assert(pPml4eSrc);
132# endif
133
134 /* Quick check for a valid guest trap. (PAE & AMD64) */
135 if (!pPDSrc)
136 {
137# if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64
138 LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%RGp\n", (int)((pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK));
139# else
140 LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%RGp\n", iPDSrc, CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK));
141# endif
142 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2GuestTrap; });
143 TRPMSetErrorCode(pVM, uErr);
144 return VINF_EM_RAW_GUEST_TRAP;
145 }
146# endif
147
148# else /* !PGM_WITH_PAGING */
149 PGSTPD pPDSrc = NULL;
150 const unsigned iPDSrc = 0;
151# endif /* !PGM_WITH_PAGING */
152
153
154# if PGM_SHW_TYPE == PGM_TYPE_32BIT
155 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT;
156 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVM->pgm.s);
157
158# elif PGM_SHW_TYPE == PGM_TYPE_PAE
159 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */
160
161# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
162 PX86PDPAE pPDDst;
163# if PGM_GST_TYPE != PGM_TYPE_PAE
164 X86PDPE PdpeSrc;
165
166 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
167 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
168# endif
169 rc = pgmShwSyncPaePDPtr(pVM, pvFault, &PdpeSrc, &pPDDst);
170 if (rc != VINF_SUCCESS)
171 {
172 AssertRC(rc);
173 return rc;
174 }
175 Assert(pPDDst);
176
177# else
178 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, pvFault);
179
180 /* Did we mark the PDPT as not present in SyncCR3? */
181 unsigned iPdpt = (pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
182 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
183 if (!pPdptDst->a[iPdpt].n.u1Present)
184 pPdptDst->a[iPdpt].n.u1Present = 1;
185# endif
186
187# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
188 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
189 PX86PDPAE pPDDst;
190# if PGM_GST_TYPE == PGM_TYPE_PROT
191 /* AMD-V nested paging */
192 X86PML4E Pml4eSrc;
193 X86PDPE PdpeSrc;
194 PX86PML4E pPml4eSrc = &Pml4eSrc;
195
196 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
197 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
198 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
199# endif
200
201 rc = pgmShwSyncLongModePDPtr(pVM, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
202 if (rc != VINF_SUCCESS)
203 {
204 AssertRC(rc);
205 return rc;
206 }
207 Assert(pPDDst);
208
209# elif PGM_SHW_TYPE == PGM_TYPE_EPT
210 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
211 PEPTPD pPDDst;
212
213 rc = pgmShwGetEPTPDPtr(pVM, pvFault, NULL, &pPDDst);
214 if (rc != VINF_SUCCESS)
215 {
216 AssertRC(rc);
217 return rc;
218 }
219 Assert(pPDDst);
220# endif
221
222# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
223 /*
224 * If we successfully correct the write protection fault due to dirty bit
225 * tracking, or this page fault is a genuine one, then return immediately.
226 */
227 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
228 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
229 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
230 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
231 || rc == VINF_EM_RAW_GUEST_TRAP)
232 {
233 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
234 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVM->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVM->pgm.s.StatRZTrap0eTime2GuestTrap; });
235 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
236 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
237 }
238
239 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0ePD[iPDSrc]);
240# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
241
242 /*
243 * A common case is the not-present error caused by lazy page table syncing.
244 *
245 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
246 * so we can safely assume that the shadow PT is present when calling SyncPage later.
247 *
248 * On failure, we ASSUME that SyncPT is out of memory or detected some kind
249 * of mapping conflict and defer to SyncCR3 in R3.
250 * (Again, we do NOT support access handlers for non-present guest pages.)
251 *
252 */
253# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
254 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
255# else
256 GSTPDE PdeSrc;
257 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
258 PdeSrc.n.u1Present = 1;
259 PdeSrc.n.u1Write = 1;
260 PdeSrc.n.u1Accessed = 1;
261 PdeSrc.n.u1User = 1;
262# endif
263 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
264 && !pPDDst->a[iPDDst].n.u1Present
265 && PdeSrc.n.u1Present
266 )
267
268 {
269 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2SyncPT; });
270 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
271 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
272 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, pvFault);
273 if (RT_SUCCESS(rc))
274 {
275 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
276 return rc;
277 }
278 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
279 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
280 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
281 return VINF_PGM_SYNC_CR3;
282 }
283
284# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
285 /*
286 * Check if this address is within any of our mappings.
287 *
288 * This is *very* fast and it's gonna save us a bit of effort below and prevent
289 * us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).
290 * (BTW, it's impossible to have physical access handlers in a mapping.)
291 */
292 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
293 {
294 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
295 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
296 for ( ; pMapping; pMapping = pMapping->CTX_SUFF(pNext))
297 {
298 if (pvFault < pMapping->GCPtr)
299 break;
300 if (pvFault - pMapping->GCPtr < pMapping->cb)
301 {
302 /*
303 * The first thing we check is if we've got an undetected conflict.
304 */
305 if (!pVM->pgm.s.fMappingsFixed)
306 {
307 unsigned iPT = pMapping->cb >> GST_PD_SHIFT;
308 while (iPT-- > 0)
309 if (pPDSrc->a[iPDSrc + iPT].n.u1Present)
310 {
311 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eConflicts);
312 Log(("Trap0e: Detected Conflict %RGv-%RGv\n", pMapping->GCPtr, pMapping->GCPtrLast));
313 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
314 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
315 return VINF_PGM_SYNC_CR3;
316 }
317 }
318
319 /*
320 * Check if the fault address is in a virtual page access handler range.
321 */
322 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->HyperVirtHandlers, pvFault);
323 if ( pCur
324 && pvFault - pCur->Core.Key < pCur->cb
325 && uErr & X86_TRAP_PF_RW)
326 {
327# ifdef IN_RC
328 STAM_PROFILE_START(&pCur->Stat, h);
329 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
330 STAM_PROFILE_STOP(&pCur->Stat, h);
331# else
332 AssertFailed();
333 rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
334# endif
335 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersMapping);
336 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
337 return rc;
338 }
339
340 /*
341 * Pretend we're not here and let the guest handle the trap.
342 */
343 TRPMSetErrorCode(pVM, uErr & ~X86_TRAP_PF_P);
344 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPFMapping);
345 LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));
346 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
347 return VINF_EM_RAW_GUEST_TRAP;
348 }
349 }
350 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
351 } /* pgmAreMappingsEnabled(&pVM->pgm.s) */
352# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
353
354 /*
355 * Check if this fault address is flagged for special treatment,
356 * which means we'll have to figure out the physical address and
357 * check flags associated with it.
358 *
359 * ASSUME that we can limit any special access handling to pages
360 * in page tables which the guest believes to be present.
361 */
362 if (PdeSrc.n.u1Present)
363 {
364 RTGCPHYS GCPhys = NIL_RTGCPHYS;
365
366# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
367# if PGM_GST_TYPE == PGM_TYPE_AMD64
368 bool fBigPagesSupported = true;
369# else
370 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
371# endif
372 if ( PdeSrc.b.u1Size
373 && fBigPagesSupported)
374 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc)
375 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
376 else
377 {
378 PGSTPT pPTSrc;
379 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
380 if (RT_SUCCESS(rc))
381 {
382 unsigned iPTESrc = (pvFault >> GST_PT_SHIFT) & GST_PT_MASK;
383 if (pPTSrc->a[iPTESrc].n.u1Present)
384 GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;
385 }
386 }
387# else
388 /* No paging so the fault address is the physical address */
389 GCPhys = (RTGCPHYS)(pvFault & ~PAGE_OFFSET_MASK);
390# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
391
392 /*
393 * If we have a GC address we'll check if it has any flags set.
394 */
395 if (GCPhys != NIL_RTGCPHYS)
396 {
397 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
398
399 PPGMPAGE pPage;
400 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
401 if (RT_SUCCESS(rc))
402 {
403 if ( PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage)
404 || PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
405 {
406 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
407 {
408 /*
409 * Physical page access handler.
410 */
411 const RTGCPHYS GCPhysFault = GCPhys | (pvFault & PAGE_OFFSET_MASK);
412 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
413 if (pCur)
414 {
415# ifdef PGM_SYNC_N_PAGES
416 /*
417 * If the region is write protected and we got a page not present fault, then sync
418 * the pages. If the fault was caused by a read, then restart the instruction.
419 * In case of write access continue to the GC write handler.
420 *
421 * ASSUMES that there is only one handler per page or that they have similar write properties.
422 */
423 if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
424 && !(uErr & X86_TRAP_PF_P))
425 {
426 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
427 if ( RT_FAILURE(rc)
428 || !(uErr & X86_TRAP_PF_RW)
429 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
430 {
431 AssertRC(rc);
432 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
433 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
434 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
435 return rc;
436 }
437 }
438# endif
439
440 AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
441 || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
442 ("Unexpected trap for physical handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
443
444# if defined(IN_RC) || defined(IN_RING0)
445 if (pCur->CTX_SUFF(pfnHandler))
446 {
447 STAM_PROFILE_START(&pCur->Stat, h);
448 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pCur->CTX_SUFF(pvUser));
449 STAM_PROFILE_STOP(&pCur->Stat, h);
450 }
451 else
452# endif
453 rc = VINF_EM_RAW_EMULATE_INSTR;
454 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersPhysical);
455 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
456 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndPhys; });
457 return rc;
458 }
459 }
460# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
461 else
462 {
463# ifdef PGM_SYNC_N_PAGES
464 /*
465 * If the region is write protected and we got a page not present fault, then sync
466 * the pages. If the fault was caused by a read, then restart the instruction.
467 * In case of write access continue to the GC write handler.
468 */
469 if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
470 && !(uErr & X86_TRAP_PF_P))
471 {
472 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
473 if ( RT_FAILURE(rc)
474 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
475 || !(uErr & X86_TRAP_PF_RW))
476 {
477 AssertRC(rc);
478 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
479 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
480 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; });
481 return rc;
482 }
483 }
484# endif
485 /*
486 * Ok, it's an virtual page access handler.
487 *
488 * Since it's faster to search by address, we'll do that first
489 * and then retry by GCPhys if that fails.
490 */
491 /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
492 /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
493 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
494 */
495 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
496 if (pCur)
497 {
498 AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb)
499 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
500 || !(uErr & X86_TRAP_PF_P)
501 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
502 ("Unexpected trap for virtual handler: %RGv (phys=%RGp) HCPhys=%HGp uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
503
504 if ( pvFault - pCur->Core.Key < pCur->cb
505 && ( uErr & X86_TRAP_PF_RW
506 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
507 {
508# ifdef IN_RC
509 STAM_PROFILE_START(&pCur->Stat, h);
510 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
511 STAM_PROFILE_STOP(&pCur->Stat, h);
512# else
513 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
514# endif
515 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtual);
516 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
517 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
518 return rc;
519 }
520 /* Unhandled part of a monitored page */
521 }
522 else
523 {
524 /* Check by physical address. */
525 PPGMVIRTHANDLER pCur;
526 unsigned iPage;
527 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK),
528 &pCur, &iPage);
529 Assert(RT_SUCCESS(rc) || !pCur);
530 if ( pCur
531 && ( uErr & X86_TRAP_PF_RW
532 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
533 {
534 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys);
535# ifdef IN_RC
536 RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK);
537 Assert(off < pCur->cb);
538 STAM_PROFILE_START(&pCur->Stat, h);
539 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off);
540 STAM_PROFILE_STOP(&pCur->Stat, h);
541# else
542 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
543# endif
544 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtualByPhys);
545 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
546 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
547 return rc;
548 }
549 }
550 }
551# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
552
553 /*
554 * There is a handled area of the page, but this fault doesn't belong to it.
555 * We must emulate the instruction.
556 *
557 * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
558 * we first check if this was a page-not-present fault for a page with only
559 * write access handlers. Restart the instruction if it wasn't a write access.
560 */
561 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersUnhandled);
562
563 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
564 && !(uErr & X86_TRAP_PF_P))
565 {
566 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
567 if ( RT_FAILURE(rc)
568 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
569 || !(uErr & X86_TRAP_PF_RW))
570 {
571 AssertRC(rc);
572 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
573 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
574 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
575 return rc;
576 }
577 }
578
579 /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
580 * It's writing to an unhandled part of the LDT page several million times.
581 */
582 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
583 LogFlow(("PGM: PGMInterpretInstruction -> rc=%d HCPhys=%RHp%s%s\n",
584 rc, pPage->HCPhys,
585 PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ? " phys" : "",
586 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ? " virt" : ""));
587 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
588 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndUnhandled; });
589 return rc;
590 } /* if any kind of handler */
591
592# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
593 if (uErr & X86_TRAP_PF_P)
594 {
595 /*
596 * The page isn't marked, but it might still be monitored by a virtual page access handler.
597 * (ASSUMES no temporary disabling of virtual handlers.)
598 */
599 /** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here,
600 * we should correct both the shadow page table and physical memory flags, and not only check for
601 * accesses within the handler region but for access to pages with virtual handlers. */
602 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
603 if (pCur)
604 {
605 AssertMsg( !(pvFault - pCur->Core.Key < pCur->cb)
606 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
607 || !(uErr & X86_TRAP_PF_P)
608 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
609 ("Unexpected trap for virtual handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
610
611 if ( pvFault - pCur->Core.Key < pCur->cb
612 && ( uErr & X86_TRAP_PF_RW
613 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
614 {
615# ifdef IN_RC
616 STAM_PROFILE_START(&pCur->Stat, h);
617 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
618 STAM_PROFILE_STOP(&pCur->Stat, h);
619# else
620 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
621# endif
622 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtualUnmarked);
623 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
624 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
625 return rc;
626 }
627 }
628 }
629# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
630 }
631 else
632 {
633 /* When the guest accesses invalid physical memory (e.g. probing of RAM or accessing a remapped MMIO range), then we'll fall
634 * back to the recompiler to emulate the instruction.
635 */
636 LogFlow(("pgmPhysGetPageEx %RGp failed with %Rrc\n", GCPhys, rc));
637 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersInvalid);
638 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
639 return VINF_EM_RAW_EMULATE_INSTR;
640 }
641
642 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
643
644# ifdef PGM_OUT_OF_SYNC_IN_GC
645 /*
646 * We are here only if page is present in Guest page tables and trap is not handled
647 * by our handlers.
648 * Check it for page out-of-sync situation.
649 */
650 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
651
652 if (!(uErr & X86_TRAP_PF_P))
653 {
654 /*
655 * Page is not present in our page tables.
656 * Try to sync it!
657 * BTW, fPageShw is invalid in this branch!
658 */
659 if (uErr & X86_TRAP_PF_US)
660 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
661 else /* supervisor */
662 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
663
664# if defined(LOG_ENABLED) && !defined(IN_RING0)
665 RTGCPHYS GCPhys;
666 uint64_t fPageGst;
667 PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
668 Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst=%08llx GCPhys=%RGp scan=%d\n",
669 pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst, GCPhys, CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)));
670# endif /* LOG_ENABLED */
671
672# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0)
673 if (CPUMGetGuestCPL(pVM, pRegFrame) == 0)
674 {
675 uint64_t fPageGst;
676 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
677 if ( RT_SUCCESS(rc)
678 && !(fPageGst & X86_PTE_US))
679 {
680 /* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */
681 if ( pvFault == (RTGCPTR)pRegFrame->eip
682 || pvFault - pRegFrame->eip < 8 /* instruction crossing a page boundary */
683# ifdef CSAM_DETECT_NEW_CODE_PAGES
684 || ( !PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip)
685 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)) /* any new code we encounter here */
686# endif /* CSAM_DETECT_NEW_CODE_PAGES */
687 )
688 {
689 LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip));
690 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip);
691 if (rc != VINF_SUCCESS)
692 {
693 /*
694 * CSAM needs to perform a job in ring 3.
695 *
696 * Sync the page before going to the host context; otherwise we'll end up in a loop if
697 * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
698 */
699 LogFlow(("CSAM ring 3 job\n"));
700 int rc2 = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, 1, uErr);
701 AssertRC(rc2);
702
703 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
704 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2CSAM; });
705 return rc;
706 }
707 }
708# ifdef CSAM_DETECT_NEW_CODE_PAGES
709 else if ( uErr == X86_TRAP_PF_RW
710 && pRegFrame->ecx >= 0x100 /* early check for movswd count */
711 && pRegFrame->ecx < 0x10000)
712 {
713 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions
714 * to detect loading of new code pages.
715 */
716
717 /*
718 * Decode the instruction.
719 */
720 RTGCPTR PC;
721 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
722 if (rc == VINF_SUCCESS)
723 {
724 DISCPUSTATE Cpu;
725 uint32_t cbOp;
726 rc = EMInterpretDisasOneEx(pVM, PC, pRegFrame, &Cpu, &cbOp);
727
728 /* For now we'll restrict this to rep movsw/d instructions */
729 if ( rc == VINF_SUCCESS
730 && Cpu.pCurInstr->opcode == OP_MOVSWD
731 && (Cpu.prefix & PREFIX_REP))
732 {
733 CSAMMarkPossibleCodePage(pVM, pvFault);
734 }
735 }
736 }
737# endif /* CSAM_DETECT_NEW_CODE_PAGES */
738
739 /*
740 * Mark this page as safe.
741 */
742 /** @todo not correct for pages that contain both code and data!! */
743 Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true));
744 CSAMMarkPage(pVM, (RTRCPTR)pvFault, true);
745 }
746 }
747# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
748 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
749 if (RT_SUCCESS(rc))
750 {
751 /* The page was successfully synced, return to the guest. */
752 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
753 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSync; });
754 return VINF_SUCCESS;
755 }
756 }
757 else
758 {
759 /*
760 * A side effect of not flushing global PDEs are out of sync pages due
761 * to physical monitored regions, that are no longer valid.
762 * Assume for now it only applies to the read/write flag
763 */
764 if (RT_SUCCESS(rc) && (uErr & X86_TRAP_PF_RW))
765 {
766 if (uErr & X86_TRAP_PF_US)
767 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
768 else /* supervisor */
769 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
770
771
772 /*
773 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the page is not present, which is not true in this case.
774 */
775 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, 1, uErr);
776 if (RT_SUCCESS(rc))
777 {
778 /*
779 * Page was successfully synced, return to guest.
780 */
781# ifdef VBOX_STRICT
782 RTGCPHYS GCPhys;
783 uint64_t fPageGst;
784 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
785 Assert(RT_SUCCESS(rc) && fPageGst & X86_PTE_RW);
786 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys, (uint64_t)fPageGst));
787
788 uint64_t fPageShw;
789 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
790 AssertMsg(RT_SUCCESS(rc) && fPageShw & X86_PTE_RW, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw));
791# endif /* VBOX_STRICT */
792 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
793 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndObs; });
794 return VINF_SUCCESS;
795 }
796
797 /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */
798 if ( CPUMGetGuestCPL(pVM, pRegFrame) == 0
799 && ((CPUMGetGuestCR0(pVM) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG)
800 && (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P))
801 {
802 uint64_t fPageGst;
803 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
804 if ( RT_SUCCESS(rc)
805 && !(fPageGst & X86_PTE_RW))
806 {
807 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
808 if (RT_SUCCESS(rc))
809 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eWPEmulInRZ);
810 else
811 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eWPEmulToR3);
812 return rc;
813 }
814 AssertMsgFailed(("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc));
815 }
816 }
817
818# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
819# ifdef VBOX_STRICT
820 /*
821 * Check for VMM page flags vs. Guest page flags consistency.
822 * Currently only for debug purposes.
823 */
824 if (RT_SUCCESS(rc))
825 {
826 /* Get guest page flags. */
827 uint64_t fPageGst;
828 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
829 if (RT_SUCCESS(rc))
830 {
831 uint64_t fPageShw;
832 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
833
834 /*
835 * Compare page flags.
836 * Note: we have AVL, A, D bits desynched.
837 */
838 AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)),
839 ("Page flags mismatch! pvFault=%RGv GCPhys=%RGp fPageShw=%08llx fPageGst=%08llx\n", pvFault, GCPhys, fPageShw, fPageGst));
840 }
841 else
842 AssertMsgFailed(("PGMGstGetPage rc=%Rrc\n", rc));
843 }
844 else
845 AssertMsgFailed(("PGMGCGetPage rc=%Rrc\n", rc));
846# endif /* VBOX_STRICT */
847# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
848 }
849 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
850# endif /* PGM_OUT_OF_SYNC_IN_GC */
851 }
852 else
853 {
854 /*
855 * Page not present in Guest OS or invalid page table address.
856 * This is potential virtual page access handler food.
857 *
858 * For the present we'll say that our access handlers don't
859 * work for this case - we've already discarded the page table
860 * not present case which is identical to this.
861 *
862 * When we perchance find we need this, we will probably have AVL
863 * trees (offset based) to operate on and we can measure their speed
864 * agains mapping a page table and probably rearrange this handling
865 * a bit. (Like, searching virtual ranges before checking the
866 * physical address.)
867 */
868 }
869 }
870
871
872# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
873 /*
874 * Conclusion, this is a guest trap.
875 */
876 LogFlow(("PGM: Unhandled #PF -> route trap to recompiler!\n"));
877 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPFUnh);
878 return VINF_EM_RAW_GUEST_TRAP;
879# else
880 /* present, but not a monitored page; perhaps the guest is probing physical memory */
881 return VINF_EM_RAW_EMULATE_INSTR;
882# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
883
884
885# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
886
887 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
888 return VERR_INTERNAL_ERROR;
889# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
890}
891#endif /* !IN_RING3 */
892
893
894/**
895 * Emulation of the invlpg instruction.
896 *
897 *
898 * @returns VBox status code.
899 *
900 * @param pVM VM handle.
901 * @param GCPtrPage Page to invalidate.
902 *
903 * @remark ASSUMES that the guest is updating before invalidating. This order
904 * isn't required by the CPU, so this is speculative and could cause
905 * trouble.
906 *
907 * @todo Flush page or page directory only if necessary!
908 * @todo Add a #define for simply invalidating the page.
909 */
910PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCPTR GCPtrPage)
911{
912#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \
913 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
914 && PGM_SHW_TYPE != PGM_TYPE_EPT
915 int rc;
916
917 LogFlow(("InvalidatePage %RGv\n", GCPtrPage));
918 /*
919 * Get the shadow PD entry and skip out if this PD isn't present.
920 * (Guessing that it is frequent for a shadow PDE to not be present, do this first.)
921 */
922# if PGM_SHW_TYPE == PGM_TYPE_32BIT
923 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
924 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
925
926# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
927 /* Fetch the pgm pool shadow descriptor. */
928 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
929 Assert(pShwPde);
930# endif
931
932# elif PGM_SHW_TYPE == PGM_TYPE_PAE
933 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT);
934 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
935
936 /* If the shadow PDPE isn't present, then skip the invalidate. */
937 if (!pPdptDst->a[iPdpt].n.u1Present)
938 {
939 Assert(!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING));
940 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
941 return VINF_SUCCESS;
942 }
943
944# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
945 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
946 PPGMPOOLPAGE pShwPde;
947 PX86PDPAE pPDDst;
948
949 /* Fetch the pgm pool shadow descriptor. */
950 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
951 AssertRCSuccessReturn(rc, rc);
952 Assert(pShwPde);
953
954 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
955 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
956# else
957 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - pool index only atm! */;
958 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
959# endif
960
961# else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
962 /* PML4 */
963 AssertReturn(pVM->pgm.s.pShwRootR3, VERR_INTERNAL_ERROR);
964
965 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;
966 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
967 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
968 PX86PDPAE pPDDst;
969 PX86PDPT pPdptDst;
970 PX86PML4E pPml4eDst;
971 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, &pPml4eDst, &pPdptDst, &pPDDst);
972 if (rc != VINF_SUCCESS)
973 {
974 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
975 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
976 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
977 PGM_INVL_GUEST_TLBS();
978 return VINF_SUCCESS;
979 }
980 Assert(pPDDst);
981
982 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
983 PX86PDPE pPdpeDst = &pPdptDst->a[iPdpt];
984
985 if (!pPdpeDst->n.u1Present)
986 {
987 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
988 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
989 PGM_INVL_GUEST_TLBS();
990 return VINF_SUCCESS;
991 }
992
993# endif /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
994
995 const SHWPDE PdeDst = *pPdeDst;
996 if (!PdeDst.n.u1Present)
997 {
998 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
999 return VINF_SUCCESS;
1000 }
1001
1002 /*
1003 * Get the guest PD entry and calc big page.
1004 */
1005# if PGM_GST_TYPE == PGM_TYPE_32BIT
1006 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
1007 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
1008 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
1009# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1010 unsigned iPDSrc;
1011# if PGM_GST_TYPE == PGM_TYPE_PAE
1012 X86PDPE PdpeSrc;
1013 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
1014# else /* AMD64 */
1015 PX86PML4E pPml4eSrc;
1016 X86PDPE PdpeSrc;
1017 PX86PDPAE pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
1018# endif
1019 GSTPDE PdeSrc;
1020
1021 if (pPDSrc)
1022 PdeSrc = pPDSrc->a[iPDSrc];
1023 else
1024 PdeSrc.u = 0;
1025# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1026
1027# if PGM_GST_TYPE == PGM_TYPE_AMD64
1028 const bool fIsBigPage = PdeSrc.b.u1Size;
1029# else
1030 const bool fIsBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1031# endif
1032
1033# ifdef IN_RING3
1034 /*
1035 * If a CR3 Sync is pending we may ignore the invalidate page operation
1036 * depending on the kind of sync and if it's a global page or not.
1037 * This doesn't make sense in GC/R0 so we'll skip it entirely there.
1038 */
1039# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
1040 if ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)
1041 || ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
1042 && fIsBigPage
1043 && PdeSrc.b.u1Global
1044 )
1045 )
1046# else
1047 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) )
1048# endif
1049 {
1050 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
1051 return VINF_SUCCESS;
1052 }
1053# endif /* IN_RING3 */
1054
1055# if PGM_GST_TYPE == PGM_TYPE_AMD64
1056 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1057
1058 /* Fetch the pgm pool shadow descriptor. */
1059 PPGMPOOLPAGE pShwPdpt = pgmPoolGetPageByHCPhys(pVM, pPml4eDst->u & X86_PML4E_PG_MASK);
1060 Assert(pShwPdpt);
1061
1062 /* Fetch the pgm pool shadow descriptor. */
1063 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & SHW_PDPE_PG_MASK);
1064 Assert(pShwPde);
1065
1066 Assert(pPml4eDst->n.u1Present && (pPml4eDst->u & SHW_PDPT_MASK));
1067 RTGCPHYS GCPhysPdpt = pPml4eSrc->u & X86_PML4E_PG_MASK;
1068
1069 if ( !pPml4eSrc->n.u1Present
1070 || pShwPdpt->GCPhys != GCPhysPdpt)
1071 {
1072 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1073 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1074 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1075 pPml4eDst->u = 0;
1076 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1077 PGM_INVL_GUEST_TLBS();
1078 return VINF_SUCCESS;
1079 }
1080 if ( pPml4eSrc->n.u1User != pPml4eDst->n.u1User
1081 || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write))
1082 {
1083 /*
1084 * Mark not present so we can resync the PML4E when it's used.
1085 */
1086 LogFlow(("InvalidatePage: Out-of-sync PML4E at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1087 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1088 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1089 pPml4eDst->u = 0;
1090 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1091 PGM_INVL_GUEST_TLBS();
1092 }
1093 else if (!pPml4eSrc->n.u1Accessed)
1094 {
1095 /*
1096 * Mark not present so we can set the accessed bit.
1097 */
1098 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1099 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1100 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1101 pPml4eDst->u = 0;
1102 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1103 PGM_INVL_GUEST_TLBS();
1104 }
1105
1106 /* Check if the PDPT entry has changed. */
1107 Assert(pPdpeDst->n.u1Present && pPdpeDst->u & SHW_PDPT_MASK);
1108 RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK;
1109 if ( !PdpeSrc.n.u1Present
1110 || pShwPde->GCPhys != GCPhysPd)
1111 {
1112 LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
1113 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1114 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1115 pPdpeDst->u = 0;
1116 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1117 PGM_INVL_GUEST_TLBS();
1118 return VINF_SUCCESS;
1119 }
1120 if ( PdpeSrc.lm.u1User != pPdpeDst->lm.u1User
1121 || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write))
1122 {
1123 /*
1124 * Mark not present so we can resync the PDPTE when it's used.
1125 */
1126 LogFlow(("InvalidatePage: Out-of-sync PDPE at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1127 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1128 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1129 pPdpeDst->u = 0;
1130 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1131 PGM_INVL_GUEST_TLBS();
1132 }
1133 else if (!PdpeSrc.lm.u1Accessed)
1134 {
1135 /*
1136 * Mark not present so we can set the accessed bit.
1137 */
1138 LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1139 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1140 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1141 pPdpeDst->u = 0;
1142 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1143 PGM_INVL_GUEST_TLBS();
1144 }
1145# endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
1146
1147# if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1148 /*
1149 * Update the shadow PDPE and free all the shadow PD entries if the PDPE is marked not present.
1150 * Note: This shouldn't actually be necessary as we monitor the PDPT page for changes.
1151 */
1152 if (!pPDSrc)
1153 {
1154 /* Guest PDPE not present */
1155 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, GCPtrPage);
1156 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1157
1158 Assert(!PdpeSrc.n.u1Present);
1159 LogFlow(("InvalidatePage: guest PDPE %d not present; clear shw pdpe\n", iPdpt));
1160
1161 /* for each page directory entry */
1162 for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++)
1163 {
1164 if ( pPDDst->a[iPD].n.u1Present
1165 && !(pPDDst->a[iPD].u & PGM_PDFLAGS_MAPPING))
1166 {
1167 pgmPoolFree(pVM, pPDDst->a[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
1168 pPDDst->a[iPD].u = 0;
1169 }
1170 }
1171 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
1172 pPdptDst->a[iPdpt].n.u1Present = 0;
1173 PGM_INVL_GUEST_TLBS();
1174 }
1175 AssertMsg(pVM->pgm.s.fMappingsFixed || (PdpeSrc.u & X86_PDPE_PG_MASK) == pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt], ("%RGp vs %RGp (mon)\n", (PdpeSrc.u & X86_PDPE_PG_MASK), pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt]));
1176# endif
1177
1178
1179 /*
1180 * Deal with the Guest PDE.
1181 */
1182 rc = VINF_SUCCESS;
1183 if (PdeSrc.n.u1Present)
1184 {
1185 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
1186 {
1187 /*
1188 * Conflict - Let SyncPT deal with it to avoid duplicate code.
1189 */
1190 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1191 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PAE);
1192 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
1193 }
1194 else if ( PdeSrc.n.u1User != PdeDst.n.u1User
1195 || (!PdeSrc.n.u1Write && PdeDst.n.u1Write))
1196 {
1197 /*
1198 * Mark not present so we can resync the PDE when it's used.
1199 */
1200 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1201 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1202# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1203 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1204# else
1205 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1206# endif
1207 pPdeDst->u = 0;
1208 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1209 PGM_INVL_GUEST_TLBS();
1210 }
1211 else if (!PdeSrc.n.u1Accessed)
1212 {
1213 /*
1214 * Mark not present so we can set the accessed bit.
1215 */
1216 LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1217 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1218# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1219 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1220# else
1221 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1222# endif
1223 pPdeDst->u = 0;
1224 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1225 PGM_INVL_GUEST_TLBS();
1226 }
1227 else if (!fIsBigPage)
1228 {
1229 /*
1230 * 4KB - page.
1231 */
1232 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1233 RTGCPHYS GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1234# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1235 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1236 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1237# endif
1238 if (pShwPage->GCPhys == GCPhys)
1239 {
1240# if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
1241 const unsigned iPTEDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1242 PSHWPT pPT = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1243 if (pPT->a[iPTEDst].n.u1Present)
1244 {
1245# ifdef PGMPOOL_WITH_USER_TRACKING
1246 /* This is very unlikely with caching/monitoring enabled. */
1247 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
1248# endif
1249 pPT->a[iPTEDst].u = 0;
1250 }
1251# else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
1252 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
1253 if (RT_SUCCESS(rc))
1254 rc = VINF_SUCCESS;
1255# endif
1256 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4KBPages));
1257 PGM_INVL_PG(GCPtrPage);
1258 }
1259 else
1260 {
1261 /*
1262 * The page table address changed.
1263 */
1264 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n",
1265 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
1266# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1267 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1268# else
1269 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1270# endif
1271 pPdeDst->u = 0;
1272 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1273 PGM_INVL_GUEST_TLBS();
1274 }
1275 }
1276 else
1277 {
1278 /*
1279 * 2/4MB - page.
1280 */
1281 /* Before freeing the page, check if anything really changed. */
1282 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1283 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1284# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1285 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1286 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1287# endif
1288 if ( pShwPage->GCPhys == GCPhys
1289 && pShwPage->enmKind == BTH_PGMPOOLKIND_PT_FOR_BIG)
1290 {
1291 /* ASSUMES a the given bits are identical for 4M and normal PDEs */
1292 /** @todo PAT */
1293 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1294 == (PdeDst.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1295 && ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
1296 || (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)))
1297 {
1298 LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
1299 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip));
1300 return VINF_SUCCESS;
1301 }
1302 }
1303
1304 /*
1305 * Ok, the page table is present and it's been changed in the guest.
1306 * If we're in host context, we'll just mark it as not present taking the lazy approach.
1307 * We could do this for some flushes in GC too, but we need an algorithm for
1308 * deciding which 4MB pages containing code likely to be executed very soon.
1309 */
1310 LogFlow(("InvalidatePage: Out-of-sync PD at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1311 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1312# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1313 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1314# else
1315 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1316# endif
1317 pPdeDst->u = 0;
1318 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages));
1319 PGM_INVL_BIG_PG(GCPtrPage);
1320 }
1321 }
1322 else
1323 {
1324 /*
1325 * Page directory is not present, mark shadow PDE not present.
1326 */
1327 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
1328 {
1329# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1330 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1331# else
1332 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1333# endif
1334 pPdeDst->u = 0;
1335 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1336 PGM_INVL_PG(GCPtrPage);
1337 }
1338 else
1339 {
1340 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1341 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDMappings));
1342 }
1343 }
1344
1345 return rc;
1346
1347#else /* guest real and protected mode */
1348 /* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */
1349 return VINF_SUCCESS;
1350#endif
1351}
1352
1353
1354#ifdef PGMPOOL_WITH_USER_TRACKING
1355/**
1356 * Update the tracking of shadowed pages.
1357 *
1358 * @param pVM The VM handle.
1359 * @param pShwPage The shadow page.
1360 * @param HCPhys The physical page we is being dereferenced.
1361 */
1362DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
1363{
1364# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1365 STAM_PROFILE_START(&pVM->pgm.s.StatTrackDeref, a);
1366 LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
1367
1368 /** @todo If this turns out to be a bottle neck (*very* likely) two things can be done:
1369 * 1. have a medium sized HCPhys -> GCPhys TLB (hash?)
1370 * 2. write protect all shadowed pages. I.e. implement caching.
1371 */
1372 /*
1373 * Find the guest address.
1374 */
1375 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1376 pRam;
1377 pRam = pRam->CTX_SUFF(pNext))
1378 {
1379 unsigned iPage = pRam->cb >> PAGE_SHIFT;
1380 while (iPage-- > 0)
1381 {
1382 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
1383 {
1384 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1385 pgmTrackDerefGCPhys(pPool, pShwPage, &pRam->aPages[iPage]);
1386 pShwPage->cPresent--;
1387 pPool->cPresent--;
1388 STAM_PROFILE_STOP(&pVM->pgm.s.StatTrackDeref, a);
1389 return;
1390 }
1391 }
1392 }
1393
1394 for (;;)
1395 AssertReleaseMsgFailed(("HCPhys=%RHp wasn't found!\n", HCPhys));
1396# else /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1397 pShwPage->cPresent--;
1398 pVM->pgm.s.CTX_SUFF(pPool)->cPresent--;
1399# endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1400}
1401
1402
1403/**
1404 * Update the tracking of shadowed pages.
1405 *
1406 * @param pVM The VM handle.
1407 * @param pShwPage The shadow page.
1408 * @param u16 The top 16-bit of the pPage->HCPhys.
1409 * @param pPage Pointer to the guest page. this will be modified.
1410 * @param iPTDst The index into the shadow table.
1411 */
1412DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVM pVM, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
1413{
1414# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1415 /*
1416 * We're making certain assumptions about the placement of cRef and idx.
1417 */
1418 Assert(MM_RAM_FLAGS_IDX_SHIFT == 48);
1419 Assert(MM_RAM_FLAGS_CREFS_SHIFT > MM_RAM_FLAGS_IDX_SHIFT);
1420
1421 /*
1422 * Just deal with the simple first time here.
1423 */
1424 if (!u16)
1425 {
1426 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackVirgin);
1427 u16 = (1 << PGMPOOL_TD_CREFS_SHIFT) | pShwPage->idx;
1428 }
1429 else
1430 u16 = pgmPoolTrackPhysExtAddref(pVM, u16, pShwPage->idx);
1431
1432 /* write back */
1433 Log2(("SyncPageWorkerTrackAddRef: u16=%#x->%#x iPTDst=%#x\n", u16, PGM_PAGE_GET_TRACKING(pPage), iPTDst));
1434 PGM_PAGE_SET_TRACKING(pPage, u16);
1435
1436# endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1437
1438 /* update statistics. */
1439 pVM->pgm.s.CTX_SUFF(pPool)->cPresent++;
1440 pShwPage->cPresent++;
1441 if (pShwPage->iFirstPresent > iPTDst)
1442 pShwPage->iFirstPresent = iPTDst;
1443}
1444#endif /* PGMPOOL_WITH_USER_TRACKING */
1445
1446
1447/**
1448 * Creates a 4K shadow page for a guest page.
1449 *
1450 * For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the
1451 * physical address. The PdeSrc argument only the flags are used. No page structured
1452 * will be mapped in this function.
1453 *
1454 * @param pVM VM handle.
1455 * @param pPteDst Destination page table entry.
1456 * @param PdeSrc Source page directory entry (i.e. Guest OS page directory entry).
1457 * Can safely assume that only the flags are being used.
1458 * @param PteSrc Source page table entry (i.e. Guest OS page table entry).
1459 * @param pShwPage Pointer to the shadow page.
1460 * @param iPTDst The index into the shadow table.
1461 *
1462 * @remark Not used for 2/4MB pages!
1463 */
1464DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVM pVM, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
1465{
1466 if (PteSrc.n.u1Present)
1467 {
1468 /*
1469 * Find the ram range.
1470 */
1471 PPGMPAGE pPage;
1472 int rc = pgmPhysGetPageEx(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK, &pPage);
1473 if (RT_SUCCESS(rc))
1474 {
1475 /** @todo investiage PWT, PCD and PAT. */
1476 /*
1477 * Make page table entry.
1478 */
1479 const RTHCPHYS HCPhys = pPage->HCPhys; /** @todo FLAGS */
1480 SHWPTE PteDst;
1481 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1482 {
1483 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
1484 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1485 {
1486#if PGM_SHW_TYPE == PGM_TYPE_EPT
1487 PteDst.u = (HCPhys & EPT_PTE_PG_MASK);
1488 PteDst.n.u1Present = 1;
1489 PteDst.n.u1Execute = 1;
1490 PteDst.n.u1IgnorePAT = 1;
1491 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1492 /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */
1493#else
1494 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1495 | (HCPhys & X86_PTE_PAE_PG_MASK);
1496#endif
1497 }
1498 else
1499 {
1500 LogFlow(("SyncPageWorker: monitored page (%RHp) -> mark not present\n", HCPhys));
1501 PteDst.u = 0;
1502 }
1503 /** @todo count these two kinds. */
1504 }
1505 else
1506 {
1507#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1508 /*
1509 * If the page or page directory entry is not marked accessed,
1510 * we mark the page not present.
1511 */
1512 if (!PteSrc.n.u1Accessed || !PdeSrc.n.u1Accessed)
1513 {
1514 LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n"));
1515 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,AccessedPage));
1516 PteDst.u = 0;
1517 }
1518 else
1519 /*
1520 * If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
1521 * when the page is modified.
1522 */
1523 if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))
1524 {
1525 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPage));
1526 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1527 | (HCPhys & X86_PTE_PAE_PG_MASK)
1528 | PGM_PTFLAGS_TRACK_DIRTY;
1529 }
1530 else
1531#endif
1532 {
1533 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageSkipped));
1534#if PGM_SHW_TYPE == PGM_TYPE_EPT
1535 PteDst.u = (HCPhys & EPT_PTE_PG_MASK);
1536 PteDst.n.u1Present = 1;
1537 PteDst.n.u1Write = 1;
1538 PteDst.n.u1Execute = 1;
1539 PteDst.n.u1IgnorePAT = 1;
1540 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1541 /* PteDst.n.u1Size = 0 */
1542#else
1543 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1544 | (HCPhys & X86_PTE_PAE_PG_MASK);
1545#endif
1546 }
1547 }
1548
1549#ifdef PGMPOOL_WITH_USER_TRACKING
1550 /*
1551 * Keep user track up to date.
1552 */
1553 if (PteDst.n.u1Present)
1554 {
1555 if (!pPteDst->n.u1Present)
1556 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1557 else if ((pPteDst->u & SHW_PTE_PG_MASK) != (PteDst.u & SHW_PTE_PG_MASK))
1558 {
1559 Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
1560 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1561 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1562 }
1563 }
1564 else if (pPteDst->n.u1Present)
1565 {
1566 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1567 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1568 }
1569#endif /* PGMPOOL_WITH_USER_TRACKING */
1570
1571 /*
1572 * Update statistics and commit the entry.
1573 */
1574#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1575 if (!PteSrc.n.u1Global)
1576 pShwPage->fSeenNonGlobal = true;
1577#endif
1578 *pPteDst = PteDst;
1579 }
1580 /* else MMIO or invalid page, we must handle them manually in the #PF handler. */
1581 /** @todo count these. */
1582 }
1583 else
1584 {
1585 /*
1586 * Page not-present.
1587 */
1588 LogFlow(("SyncPageWorker: page not present in Pte\n"));
1589#ifdef PGMPOOL_WITH_USER_TRACKING
1590 /* Keep user track up to date. */
1591 if (pPteDst->n.u1Present)
1592 {
1593 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1594 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1595 }
1596#endif /* PGMPOOL_WITH_USER_TRACKING */
1597 pPteDst->u = 0;
1598 /** @todo count these. */
1599 }
1600}
1601
1602
1603/**
1604 * Syncs a guest OS page.
1605 *
1606 * There are no conflicts at this point, neither is there any need for
1607 * page table allocations.
1608 *
1609 * @returns VBox status code.
1610 * @returns VINF_PGM_SYNCPAGE_MODIFIED_PDE if it modifies the PDE in any way.
1611 * @param pVM VM handle.
1612 * @param PdeSrc Page directory entry of the guest.
1613 * @param GCPtrPage Guest context page address.
1614 * @param cPages Number of pages to sync (PGM_SYNC_N_PAGES) (default=1).
1615 * @param uErr Fault error (X86_TRAP_PF_*).
1616 */
1617PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
1618{
1619 LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr));
1620
1621#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
1622 || PGM_GST_TYPE == PGM_TYPE_PAE \
1623 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
1624 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1625 && PGM_SHW_TYPE != PGM_TYPE_EPT
1626
1627# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1628 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
1629# endif
1630
1631 /*
1632 * Assert preconditions.
1633 */
1634 Assert(PdeSrc.n.u1Present);
1635 Assert(cPages);
1636 STAM_COUNTER_INC(&pVM->pgm.s.StatSyncPagePD[(GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK]);
1637
1638 /*
1639 * Get the shadow PDE, find the shadow page table in the pool.
1640 */
1641# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1642 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1643 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
1644
1645# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1646 /* Fetch the pgm pool shadow descriptor. */
1647 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
1648 Assert(pShwPde);
1649# endif
1650
1651# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1652
1653# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1654 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1655 PPGMPOOLPAGE pShwPde;
1656 PX86PDPAE pPDDst;
1657
1658 /* Fetch the pgm pool shadow descriptor. */
1659 int rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
1660 AssertRCSuccessReturn(rc, rc);
1661 Assert(pShwPde);
1662
1663 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
1664 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1665# else
1666 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
1667 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT);
1668 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
1669 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
1670 AssertReturn(pPdeDst, VERR_INTERNAL_ERROR);
1671# endif
1672# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1673 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1674 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1675 PX86PDPAE pPDDst;
1676 PX86PDPT pPdptDst;
1677
1678 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1679 AssertRCSuccessReturn(rc, rc);
1680 Assert(pPDDst && pPdptDst);
1681 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1682# endif
1683
1684 SHWPDE PdeDst = *pPdeDst;
1685 AssertMsg(PdeDst.n.u1Present, ("%p=%llx\n", pPdeDst, (uint64_t)PdeDst.u));
1686 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1687
1688# if PGM_GST_TYPE == PGM_TYPE_AMD64
1689 /* Fetch the pgm pool shadow descriptor. */
1690 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
1691 Assert(pShwPde);
1692# endif
1693
1694 /*
1695 * Check that the page is present and that the shadow PDE isn't out of sync.
1696 */
1697# if PGM_GST_TYPE == PGM_TYPE_AMD64
1698 const bool fBigPage = PdeSrc.b.u1Size;
1699# else
1700 const bool fBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1701# endif
1702 RTGCPHYS GCPhys;
1703 if (!fBigPage)
1704 {
1705 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1706# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1707 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1708 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1709# endif
1710 }
1711 else
1712 {
1713 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1714# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1715 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1716 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1717# endif
1718 }
1719 if ( pShwPage->GCPhys == GCPhys
1720 && PdeSrc.n.u1Present
1721 && (PdeSrc.n.u1User == PdeDst.n.u1User)
1722 && (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write)
1723# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1724 && (!fNoExecuteBitValid || PdeSrc.n.u1NoExecute == PdeDst.n.u1NoExecute)
1725# endif
1726 )
1727 {
1728 /*
1729 * Check that the PDE is marked accessed already.
1730 * Since we set the accessed bit *before* getting here on a #PF, this
1731 * check is only meant for dealing with non-#PF'ing paths.
1732 */
1733 if (PdeSrc.n.u1Accessed)
1734 {
1735 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1736 if (!fBigPage)
1737 {
1738 /*
1739 * 4KB Page - Map the guest page table.
1740 */
1741 PGSTPT pPTSrc;
1742 int rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
1743 if (RT_SUCCESS(rc))
1744 {
1745# ifdef PGM_SYNC_N_PAGES
1746 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1747 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1748 {
1749 /*
1750 * This code path is currently only taken when the caller is PGMTrap0eHandler
1751 * for non-present pages!
1752 *
1753 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1754 * deal with locality.
1755 */
1756 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1757# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1758 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1759 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
1760# else
1761 const unsigned offPTSrc = 0;
1762# endif
1763 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1764 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1765 iPTDst = 0;
1766 else
1767 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1768 for (; iPTDst < iPTDstEnd; iPTDst++)
1769 {
1770 if (!pPTDst->a[iPTDst].n.u1Present)
1771 {
1772 GSTPTE PteSrc = pPTSrc->a[offPTSrc + iPTDst];
1773 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
1774 NOREF(GCPtrCurPage);
1775#ifndef IN_RING0
1776 /*
1777 * Assuming kernel code will be marked as supervisor - and not as user level
1778 * and executed using a conforming code selector - And marked as readonly.
1779 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
1780 */
1781 PPGMPAGE pPage;
1782 if ( ((PdeSrc.u & PteSrc.u) & (X86_PTE_RW | X86_PTE_US))
1783 || iPTDst == ((GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK) /* always sync GCPtrPage */
1784 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)GCPtrCurPage)
1785 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
1786 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1787 )
1788#endif /* else: CSAM not active */
1789 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1790 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1791 GCPtrCurPage, PteSrc.n.u1Present,
1792 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1793 PteSrc.n.u1User & PdeSrc.n.u1User,
1794 (uint64_t)PteSrc.u,
1795 (uint64_t)pPTDst->a[iPTDst].u,
1796 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1797 }
1798 }
1799 }
1800 else
1801# endif /* PGM_SYNC_N_PAGES */
1802 {
1803 const unsigned iPTSrc = (GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK;
1804 GSTPTE PteSrc = pPTSrc->a[iPTSrc];
1805 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1806 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1807 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s\n",
1808 GCPtrPage, PteSrc.n.u1Present,
1809 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1810 PteSrc.n.u1User & PdeSrc.n.u1User,
1811 (uint64_t)PteSrc.u,
1812 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1813 }
1814 }
1815 else /* MMIO or invalid page: emulated in #PF handler. */
1816 {
1817 LogFlow(("PGM_GCPHYS_2_PTR %RGp failed with %Rrc\n", GCPhys, rc));
1818 Assert(!pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK].n.u1Present);
1819 }
1820 }
1821 else
1822 {
1823 /*
1824 * 4/2MB page - lazy syncing shadow 4K pages.
1825 * (There are many causes of getting here, it's no longer only CSAM.)
1826 */
1827 /* Calculate the GC physical address of this 4KB shadow page. */
1828 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
1829 /* Find ram range. */
1830 PPGMPAGE pPage;
1831 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1832 if (RT_SUCCESS(rc))
1833 {
1834 /*
1835 * Make shadow PTE entry.
1836 */
1837 const RTHCPHYS HCPhys = pPage->HCPhys; /** @todo PAGE FLAGS */
1838 SHWPTE PteDst;
1839 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1840 | (HCPhys & X86_PTE_PAE_PG_MASK);
1841 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1842 {
1843 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1844 PteDst.n.u1Write = 0;
1845 else
1846 PteDst.u = 0;
1847 }
1848 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1849# ifdef PGMPOOL_WITH_USER_TRACKING
1850 if (PteDst.n.u1Present && !pPTDst->a[iPTDst].n.u1Present)
1851 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1852# endif
1853 pPTDst->a[iPTDst] = PteDst;
1854
1855
1856 /*
1857 * If the page is not flagged as dirty and is writable, then make it read-only
1858 * at PD level, so we can set the dirty bit when the page is modified.
1859 *
1860 * ASSUMES that page access handlers are implemented on page table entry level.
1861 * Thus we will first catch the dirty access and set PDE.D and restart. If
1862 * there is an access handler, we'll trap again and let it work on the problem.
1863 */
1864 /** @todo r=bird: figure out why we need this here, SyncPT should've taken care of this already.
1865 * As for invlpg, it simply frees the whole shadow PT.
1866 * ...It's possibly because the guest clears it and the guest doesn't really tell us... */
1867 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
1868 {
1869 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
1870 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
1871 PdeDst.n.u1Write = 0;
1872 }
1873 else
1874 {
1875 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
1876 PdeDst.n.u1Write = PdeSrc.n.u1Write;
1877 }
1878 *pPdeDst = PdeDst;
1879 Log2(("SyncPage: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} GCPhys=%RGp%s\n",
1880 GCPtrPage, PdeSrc.n.u1Present, PdeSrc.n.u1Write, PdeSrc.n.u1User, (uint64_t)PdeSrc.u, GCPhys,
1881 PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1882 }
1883 else
1884 LogFlow(("PGM_GCPHYS_2_PTR %RGp (big) failed with %Rrc\n", GCPhys, rc));
1885 }
1886 return VINF_SUCCESS;
1887 }
1888 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPagePDNAs));
1889 }
1890 else
1891 {
1892 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync));
1893 Log2(("SyncPage: Out-Of-Sync PDE at %RGp PdeSrc=%RX64 PdeDst=%RX64 (GCPhys %RGp vs %RGp)\n",
1894 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys));
1895 }
1896
1897 /*
1898 * Mark the PDE not present. Restart the instruction and let #PF call SyncPT.
1899 * Yea, I'm lazy.
1900 */
1901 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1902# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1903 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
1904# else
1905 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPDDst);
1906# endif
1907
1908 pPdeDst->u = 0;
1909 PGM_INVL_GUEST_TLBS();
1910 return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
1911
1912#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
1913 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1914 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
1915
1916# ifdef PGM_SYNC_N_PAGES
1917 /*
1918 * Get the shadow PDE, find the shadow page table in the pool.
1919 */
1920# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1921 X86PDE PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage);
1922
1923# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1924 X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);
1925
1926# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1927 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1928 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; NOREF(iPdpt);
1929 PX86PDPAE pPDDst;
1930 X86PDEPAE PdeDst;
1931 PX86PDPT pPdptDst;
1932
1933 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1934 AssertRCSuccessReturn(rc, rc);
1935 Assert(pPDDst && pPdptDst);
1936 PdeDst = pPDDst->a[iPDDst];
1937# elif PGM_SHW_TYPE == PGM_TYPE_EPT
1938 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1939 PEPTPD pPDDst;
1940 EPTPDE PdeDst;
1941
1942 int rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, NULL, &pPDDst);
1943 if (rc != VINF_SUCCESS)
1944 {
1945 AssertRC(rc);
1946 return rc;
1947 }
1948 Assert(pPDDst);
1949 PdeDst = pPDDst->a[iPDDst];
1950# endif
1951 AssertMsg(PdeDst.n.u1Present, ("%#llx\n", (uint64_t)PdeDst.u));
1952 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1953 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1954
1955 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1956 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1957 {
1958 /*
1959 * This code path is currently only taken when the caller is PGMTrap0eHandler
1960 * for non-present pages!
1961 *
1962 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1963 * deal with locality.
1964 */
1965 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1966 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1967 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1968 iPTDst = 0;
1969 else
1970 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1971 for (; iPTDst < iPTDstEnd; iPTDst++)
1972 {
1973 if (!pPTDst->a[iPTDst].n.u1Present)
1974 {
1975 GSTPTE PteSrc;
1976
1977 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
1978
1979 /* Fake the page table entry */
1980 PteSrc.u = GCPtrCurPage;
1981 PteSrc.n.u1Present = 1;
1982 PteSrc.n.u1Dirty = 1;
1983 PteSrc.n.u1Accessed = 1;
1984 PteSrc.n.u1Write = 1;
1985 PteSrc.n.u1User = 1;
1986
1987 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1988
1989 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1990 GCPtrCurPage, PteSrc.n.u1Present,
1991 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1992 PteSrc.n.u1User & PdeSrc.n.u1User,
1993 (uint64_t)PteSrc.u,
1994 (uint64_t)pPTDst->a[iPTDst].u,
1995 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1996 }
1997 else
1998 Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, pPTDst->a[iPTDst].u));
1999 }
2000 }
2001 else
2002# endif /* PGM_SYNC_N_PAGES */
2003 {
2004 GSTPTE PteSrc;
2005 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2006 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
2007
2008 /* Fake the page table entry */
2009 PteSrc.u = GCPtrCurPage;
2010 PteSrc.n.u1Present = 1;
2011 PteSrc.n.u1Dirty = 1;
2012 PteSrc.n.u1Accessed = 1;
2013 PteSrc.n.u1Write = 1;
2014 PteSrc.n.u1User = 1;
2015 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2016
2017 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}PteDst=%08llx%s\n",
2018 GCPtrPage, PteSrc.n.u1Present,
2019 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2020 PteSrc.n.u1User & PdeSrc.n.u1User,
2021 (uint64_t)PteSrc.u,
2022 (uint64_t)pPTDst->a[iPTDst].u,
2023 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2024 }
2025 return VINF_SUCCESS;
2026
2027#else
2028 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
2029 return VERR_INTERNAL_ERROR;
2030#endif
2031}
2032
2033
2034#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2035/**
2036 * Investigate page fault and handle write protection page faults caused by
2037 * dirty bit tracking.
2038 *
2039 * @returns VBox status code.
2040 * @param pVM VM handle.
2041 * @param uErr Page fault error code.
2042 * @param pPdeDst Shadow page directory entry.
2043 * @param pPdeSrc Guest page directory entry.
2044 * @param GCPtrPage Guest context page address.
2045 */
2046PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
2047{
2048 bool fWriteProtect = !!(CPUMGetGuestCR0(pVM) & X86_CR0_WP);
2049 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US);
2050 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW);
2051# if PGM_GST_TYPE == PGM_TYPE_AMD64
2052 bool fBigPagesSupported = true;
2053# else
2054 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
2055# endif
2056# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2057 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
2058# endif
2059 unsigned uPageFaultLevel;
2060 int rc;
2061
2062 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2063 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
2064
2065# if PGM_GST_TYPE == PGM_TYPE_PAE \
2066 || PGM_GST_TYPE == PGM_TYPE_AMD64
2067
2068# if PGM_GST_TYPE == PGM_TYPE_AMD64
2069 PX86PML4E pPml4eSrc;
2070 PX86PDPE pPdpeSrc;
2071
2072 pPdpeSrc = pgmGstGetLongModePDPTPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc);
2073 Assert(pPml4eSrc);
2074
2075 /*
2076 * Real page fault? (PML4E level)
2077 */
2078 if ( (uErr & X86_TRAP_PF_RSVD)
2079 || !pPml4eSrc->n.u1Present
2080 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPml4eSrc->n.u1NoExecute)
2081 || (fWriteFault && !pPml4eSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2082 || (fUserLevelFault && !pPml4eSrc->n.u1User)
2083 )
2084 {
2085 uPageFaultLevel = 0;
2086 goto l_UpperLevelPageFault;
2087 }
2088 Assert(pPdpeSrc);
2089
2090# else /* PAE */
2091 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(&pVM->pgm.s, GCPtrPage);
2092# endif /* PAE */
2093
2094 /*
2095 * Real page fault? (PDPE level)
2096 */
2097 if ( (uErr & X86_TRAP_PF_RSVD)
2098 || !pPdpeSrc->n.u1Present
2099# if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */
2100 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdpeSrc->lm.u1NoExecute)
2101 || (fWriteFault && !pPdpeSrc->lm.u1Write && (fUserLevelFault || fWriteProtect))
2102 || (fUserLevelFault && !pPdpeSrc->lm.u1User)
2103# endif
2104 )
2105 {
2106 uPageFaultLevel = 1;
2107 goto l_UpperLevelPageFault;
2108 }
2109# endif
2110
2111 /*
2112 * Real page fault? (PDE level)
2113 */
2114 if ( (uErr & X86_TRAP_PF_RSVD)
2115 || !pPdeSrc->n.u1Present
2116# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2117 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdeSrc->n.u1NoExecute)
2118# endif
2119 || (fWriteFault && !pPdeSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2120 || (fUserLevelFault && !pPdeSrc->n.u1User) )
2121 {
2122 uPageFaultLevel = 2;
2123 goto l_UpperLevelPageFault;
2124 }
2125
2126 /*
2127 * First check the easy case where the page directory has been marked read-only to track
2128 * the dirty bit of an emulated BIG page
2129 */
2130 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2131 {
2132 /* Mark guest page directory as accessed */
2133# if PGM_GST_TYPE == PGM_TYPE_AMD64
2134 pPml4eSrc->n.u1Accessed = 1;
2135 pPdpeSrc->lm.u1Accessed = 1;
2136# endif
2137 pPdeSrc->b.u1Accessed = 1;
2138
2139 /*
2140 * Only write protection page faults are relevant here.
2141 */
2142 if (fWriteFault)
2143 {
2144 /* Mark guest page directory as dirty (BIG page only). */
2145 pPdeSrc->b.u1Dirty = 1;
2146
2147 if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
2148 {
2149 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2150
2151 Assert(pPdeSrc->b.u1Write);
2152
2153 pPdeDst->n.u1Write = 1;
2154 pPdeDst->n.u1Accessed = 1;
2155 pPdeDst->au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
2156 PGM_INVL_BIG_PG(GCPtrPage);
2157 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2158 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2159 }
2160 }
2161 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2162 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2163 }
2164 /* else: 4KB page table */
2165
2166 /*
2167 * Map the guest page table.
2168 */
2169 PGSTPT pPTSrc;
2170 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2171 if (RT_SUCCESS(rc))
2172 {
2173 /*
2174 * Real page fault?
2175 */
2176 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2177 const GSTPTE PteSrc = *pPteSrc;
2178 if ( !PteSrc.n.u1Present
2179# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2180 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && PteSrc.n.u1NoExecute)
2181# endif
2182 || (fWriteFault && !PteSrc.n.u1Write && (fUserLevelFault || fWriteProtect))
2183 || (fUserLevelFault && !PteSrc.n.u1User)
2184 )
2185 {
2186 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2187 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2188 LogFlow(("CheckPageFault: real page fault at %RGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u));
2189
2190 /* Check the present bit as the shadow tables can cause different error codes by being out of sync.
2191 * See the 2nd case above as well.
2192 */
2193 if (pPdeSrc->n.u1Present && pPteSrc->n.u1Present)
2194 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2195
2196 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2197 return VINF_EM_RAW_GUEST_TRAP;
2198 }
2199 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
2200
2201 /*
2202 * Set the accessed bits in the page directory and the page table.
2203 */
2204# if PGM_GST_TYPE == PGM_TYPE_AMD64
2205 pPml4eSrc->n.u1Accessed = 1;
2206 pPdpeSrc->lm.u1Accessed = 1;
2207# endif
2208 pPdeSrc->n.u1Accessed = 1;
2209 pPteSrc->n.u1Accessed = 1;
2210
2211 /*
2212 * Only write protection page faults are relevant here.
2213 */
2214 if (fWriteFault)
2215 {
2216 /* Write access, so mark guest entry as dirty. */
2217# ifdef VBOX_WITH_STATISTICS
2218 if (!pPteSrc->n.u1Dirty)
2219 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtiedPage));
2220 else
2221 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageAlreadyDirty));
2222# endif
2223
2224 pPteSrc->n.u1Dirty = 1;
2225
2226 if (pPdeDst->n.u1Present)
2227 {
2228#ifndef IN_RING0
2229 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.
2230 * Our individual shadow handlers will provide more information and force a fatal exit.
2231 */
2232 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))
2233 {
2234 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage));
2235 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2236 return VINF_SUCCESS;
2237 }
2238#endif
2239 /*
2240 * Map shadow page table.
2241 */
2242 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2243 if (pShwPage)
2244 {
2245 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2246 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2247 if ( pPteDst->n.u1Present /** @todo Optimize accessed bit emulation? */
2248 && (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY))
2249 {
2250 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
2251# ifdef VBOX_STRICT
2252 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
2253 if (pPage)
2254 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
2255 ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
2256# endif
2257 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2258
2259 Assert(pPteSrc->n.u1Write);
2260
2261 pPteDst->n.u1Write = 1;
2262 pPteDst->n.u1Dirty = 1;
2263 pPteDst->n.u1Accessed = 1;
2264 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY;
2265 PGM_INVL_PG(GCPtrPage);
2266
2267 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2268 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2269 }
2270 }
2271 else
2272 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));
2273 }
2274 }
2275/** @todo Optimize accessed bit emulation? */
2276# ifdef VBOX_STRICT
2277 /*
2278 * Sanity check.
2279 */
2280 else if ( !pPteSrc->n.u1Dirty
2281 && (pPdeSrc->n.u1Write & pPteSrc->n.u1Write)
2282 && pPdeDst->n.u1Present)
2283 {
2284 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2285 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2286 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2287 if ( pPteDst->n.u1Present
2288 && pPteDst->n.u1Write)
2289 LogFlow(("Writable present page %RGv not marked for dirty bit tracking!!!\n", GCPtrPage));
2290 }
2291# endif /* VBOX_STRICT */
2292 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2293 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2294 }
2295 AssertRC(rc);
2296 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2297 return rc;
2298
2299
2300l_UpperLevelPageFault:
2301 /*
2302 * Pagefault detected while checking the PML4E, PDPE or PDE.
2303 * Single exit handler to get rid of duplicate code paths.
2304 */
2305 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2306 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2307 Log(("CheckPageFault: real page fault at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
2308
2309 if (
2310# if PGM_GST_TYPE == PGM_TYPE_AMD64
2311 pPml4eSrc->n.u1Present &&
2312# endif
2313# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
2314 pPdpeSrc->n.u1Present &&
2315# endif
2316 pPdeSrc->n.u1Present)
2317 {
2318 /* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
2319 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2320 {
2321 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2322 }
2323 else
2324 {
2325 /*
2326 * Map the guest page table.
2327 */
2328 PGSTPT pPTSrc;
2329 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2330 if (RT_SUCCESS(rc))
2331 {
2332 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2333 const GSTPTE PteSrc = *pPteSrc;
2334 if (pPteSrc->n.u1Present)
2335 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2336 }
2337 AssertRC(rc);
2338 }
2339 }
2340 return VINF_EM_RAW_GUEST_TRAP;
2341}
2342#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
2343
2344
2345/**
2346 * Sync a shadow page table.
2347 *
2348 * The shadow page table is not present. This includes the case where
2349 * there is a conflict with a mapping.
2350 *
2351 * @returns VBox status code.
2352 * @param pVM VM handle.
2353 * @param iPD Page directory index.
2354 * @param pPDSrc Source page directory (i.e. Guest OS page directory).
2355 * Assume this is a temporary mapping.
2356 * @param GCPtrPage GC Pointer of the page that caused the fault
2357 */
2358PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
2359{
2360 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2361 STAM_COUNTER_INC(&pVM->pgm.s.StatSyncPtPD[iPDSrc]);
2362 LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage));
2363
2364#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
2365 || PGM_GST_TYPE == PGM_TYPE_PAE \
2366 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2367 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2368 && PGM_SHW_TYPE != PGM_TYPE_EPT
2369
2370 int rc = VINF_SUCCESS;
2371
2372 /*
2373 * Validate input a little bit.
2374 */
2375 AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%RGv\n", iPDSrc, GCPtrPage));
2376# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2377 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
2378 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
2379
2380# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2381 /* Fetch the pgm pool shadow descriptor. */
2382 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
2383 Assert(pShwPde);
2384# endif
2385
2386# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2387# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2388 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2389 PPGMPOOLPAGE pShwPde;
2390 PX86PDPAE pPDDst;
2391 PSHWPDE pPdeDst;
2392
2393 /* Fetch the pgm pool shadow descriptor. */
2394 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
2395 AssertRCSuccessReturn(rc, rc);
2396 Assert(pShwPde);
2397
2398 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2399 pPdeDst = &pPDDst->a[iPDDst];
2400# else
2401 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
2402 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpt);
2403 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
2404 PSHWPDE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
2405# endif
2406# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2407 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2408 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2409 PX86PDPAE pPDDst;
2410 PX86PDPT pPdptDst;
2411 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2412 AssertRCSuccessReturn(rc, rc);
2413 Assert(pPDDst);
2414 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2415# endif
2416 SHWPDE PdeDst = *pPdeDst;
2417
2418# if PGM_GST_TYPE == PGM_TYPE_AMD64
2419 /* Fetch the pgm pool shadow descriptor. */
2420 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2421 Assert(pShwPde);
2422# endif
2423
2424# ifndef PGM_WITHOUT_MAPPINGS
2425 /*
2426 * Check for conflicts.
2427 * GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
2428 * HC: Simply resolve the conflict.
2429 */
2430 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
2431 {
2432 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
2433# ifndef IN_RING3
2434 Log(("SyncPT: Conflict at %RGv\n", GCPtrPage));
2435 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2436 return VERR_ADDRESS_CONFLICT;
2437# else
2438 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage);
2439 Assert(pMapping);
2440# if PGM_GST_TYPE == PGM_TYPE_32BIT
2441 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2442# elif PGM_GST_TYPE == PGM_TYPE_PAE
2443 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2444# else
2445 AssertFailed(); /* can't happen for amd64 */
2446# endif
2447 if (RT_FAILURE(rc))
2448 {
2449 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2450 return rc;
2451 }
2452 PdeDst = *pPdeDst;
2453# endif
2454 }
2455# else /* PGM_WITHOUT_MAPPINGS */
2456 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
2457# endif /* PGM_WITHOUT_MAPPINGS */
2458 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2459
2460# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2461 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
2462 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
2463# endif
2464
2465 /*
2466 * Sync page directory entry.
2467 */
2468 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2469 if (PdeSrc.n.u1Present)
2470 {
2471 /*
2472 * Allocate & map the page table.
2473 */
2474 PSHWPT pPTDst;
2475# if PGM_GST_TYPE == PGM_TYPE_AMD64
2476 const bool fPageTable = !PdeSrc.b.u1Size;
2477# else
2478 const bool fPageTable = !PdeSrc.b.u1Size || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
2479# endif
2480 PPGMPOOLPAGE pShwPage;
2481 RTGCPHYS GCPhys;
2482 if (fPageTable)
2483 {
2484 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
2485# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2486 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2487 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);
2488# endif
2489# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2490 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2491# else
2492 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2493# endif
2494 }
2495 else
2496 {
2497 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
2498# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2499 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
2500 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
2501# endif
2502# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2503 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx, iPDDst, &pShwPage);
2504# else
2505 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2506# endif
2507 }
2508 if (rc == VINF_SUCCESS)
2509 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2510 else if (rc == VINF_PGM_CACHED_PAGE)
2511 {
2512 /*
2513 * The PT was cached, just hook it up.
2514 */
2515 if (fPageTable)
2516 PdeDst.u = pShwPage->Core.Key
2517 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2518 else
2519 {
2520 PdeDst.u = pShwPage->Core.Key
2521 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2522 /* (see explanation and assumptions further down.) */
2523 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2524 {
2525 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2526 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2527 PdeDst.b.u1Write = 0;
2528 }
2529 }
2530 *pPdeDst = PdeDst;
2531# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2532 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2533# endif
2534 return VINF_SUCCESS;
2535 }
2536 else if (rc == VERR_PGM_POOL_FLUSHED)
2537 {
2538 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
2539# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2540 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2541# endif
2542 return VINF_PGM_SYNC_CR3;
2543 }
2544 else
2545 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2546 PdeDst.u &= X86_PDE_AVL_MASK;
2547 PdeDst.u |= pShwPage->Core.Key;
2548
2549 /*
2550 * Page directory has been accessed (this is a fault situation, remember).
2551 */
2552 pPDSrc->a[iPDSrc].n.u1Accessed = 1;
2553 if (fPageTable)
2554 {
2555 /*
2556 * Page table - 4KB.
2557 *
2558 * Sync all or just a few entries depending on PGM_SYNC_N_PAGES.
2559 */
2560 Log2(("SyncPT: 4K %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx}\n",
2561 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u));
2562 PGSTPT pPTSrc;
2563 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
2564 if (RT_SUCCESS(rc))
2565 {
2566 /*
2567 * Start by syncing the page directory entry so CSAM's TLB trick works.
2568 */
2569 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | X86_PDE_AVL_MASK))
2570 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2571 *pPdeDst = PdeDst;
2572# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2573 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2574# endif
2575
2576 /*
2577 * Directory/page user or supervisor privilege: (same goes for read/write)
2578 *
2579 * Directory Page Combined
2580 * U/S U/S U/S
2581 * 0 0 0
2582 * 0 1 0
2583 * 1 0 0
2584 * 1 1 1
2585 *
2586 * Simple AND operation. Table listed for completeness.
2587 *
2588 */
2589 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT4K));
2590# ifdef PGM_SYNC_N_PAGES
2591 unsigned iPTBase = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2592 unsigned iPTDst = iPTBase;
2593 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
2594 if (iPTDst <= PGM_SYNC_NR_PAGES / 2)
2595 iPTDst = 0;
2596 else
2597 iPTDst -= PGM_SYNC_NR_PAGES / 2;
2598# else /* !PGM_SYNC_N_PAGES */
2599 unsigned iPTDst = 0;
2600 const unsigned iPTDstEnd = RT_ELEMENTS(pPTDst->a);
2601# endif /* !PGM_SYNC_N_PAGES */
2602# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2603 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2604 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
2605# else
2606 const unsigned offPTSrc = 0;
2607# endif
2608 for (; iPTDst < iPTDstEnd; iPTDst++)
2609 {
2610 const unsigned iPTSrc = iPTDst + offPTSrc;
2611 const GSTPTE PteSrc = pPTSrc->a[iPTSrc];
2612
2613 if (PteSrc.n.u1Present) /* we've already cleared it above */
2614 {
2615# ifndef IN_RING0
2616 /*
2617 * Assuming kernel code will be marked as supervisor - and not as user level
2618 * and executed using a conforming code selector - And marked as readonly.
2619 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
2620 */
2621 PPGMPAGE pPage;
2622 if ( ((PdeSrc.u & pPTSrc->a[iPTSrc].u) & (X86_PTE_RW | X86_PTE_US))
2623 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)))
2624 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
2625 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2626 )
2627# endif
2628 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2629 Log2(("SyncPT: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n",
2630 (RTGCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)),
2631 PteSrc.n.u1Present,
2632 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2633 PteSrc.n.u1User & PdeSrc.n.u1User,
2634 (uint64_t)PteSrc.u,
2635 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],
2636 (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) ));
2637 }
2638 } /* for PTEs */
2639 }
2640 }
2641 else
2642 {
2643 /*
2644 * Big page - 2/4MB.
2645 *
2646 * We'll walk the ram range list in parallel and optimize lookups.
2647 * We will only sync on shadow page table at a time.
2648 */
2649 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT4M));
2650
2651 /**
2652 * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
2653 */
2654
2655 /*
2656 * Start by syncing the page directory entry.
2657 */
2658 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | (X86_PDE_AVL_MASK & ~PGM_PDFLAGS_TRACK_DIRTY)))
2659 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2660
2661 /*
2662 * If the page is not flagged as dirty and is writable, then make it read-only
2663 * at PD level, so we can set the dirty bit when the page is modified.
2664 *
2665 * ASSUMES that page access handlers are implemented on page table entry level.
2666 * Thus we will first catch the dirty access and set PDE.D and restart. If
2667 * there is an access handler, we'll trap again and let it work on the problem.
2668 */
2669 /** @todo move the above stuff to a section in the PGM documentation. */
2670 Assert(!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY));
2671 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2672 {
2673 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2674 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2675 PdeDst.b.u1Write = 0;
2676 }
2677 *pPdeDst = PdeDst;
2678# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2679 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2680# endif
2681
2682 /*
2683 * Fill the shadow page table.
2684 */
2685 /* Get address and flags from the source PDE. */
2686 SHWPTE PteDstBase;
2687 PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
2688
2689 /* Loop thru the entries in the shadow PT. */
2690 const RTGCPTR GCPtr = (GCPtrPage >> SHW_PD_SHIFT) << SHW_PD_SHIFT; NOREF(GCPtr);
2691 Log2(("SyncPT: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} Shw=%RGv GCPhys=%RGp %s\n",
2692 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr,
2693 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2694 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2695 unsigned iPTDst = 0;
2696 while (iPTDst < RT_ELEMENTS(pPTDst->a))
2697 {
2698 /* Advance ram range list. */
2699 while (pRam && GCPhys > pRam->GCPhysLast)
2700 pRam = pRam->CTX_SUFF(pNext);
2701 if (pRam && GCPhys >= pRam->GCPhys)
2702 {
2703 unsigned iHCPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2704 do
2705 {
2706 /* Make shadow PTE. */
2707 PPGMPAGE pPage = &pRam->aPages[iHCPage];
2708 SHWPTE PteDst;
2709
2710 /* Make sure the RAM has already been allocated. */
2711 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) /** @todo PAGE FLAGS */
2712 {
2713 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
2714 {
2715# ifdef IN_RING3
2716 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
2717# else
2718 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2719# endif
2720 if (rc != VINF_SUCCESS)
2721 return rc;
2722 }
2723 }
2724
2725 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2726 {
2727 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
2728 {
2729 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2730 PteDst.n.u1Write = 0;
2731 }
2732 else
2733 PteDst.u = 0;
2734 }
2735# ifndef IN_RING0
2736 /*
2737 * Assuming kernel code will be marked as supervisor and not as user level and executed
2738 * using a conforming code selector. Don't check for readonly, as that implies the whole
2739 * 4MB can be code or readonly data. Linux enables write access for its large pages.
2740 */
2741 else if ( !PdeSrc.n.u1User
2742 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))))
2743 PteDst.u = 0;
2744# endif
2745 else
2746 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2747# ifdef PGMPOOL_WITH_USER_TRACKING
2748 if (PteDst.n.u1Present)
2749 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, pPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst); /** @todo PAGE FLAGS */
2750# endif
2751 /* commit it */
2752 pPTDst->a[iPTDst] = PteDst;
2753 Log4(("SyncPT: BIG %RGv PteDst:{P=%d RW=%d U=%d raw=%08llx}%s\n",
2754 (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,
2755 PteDst.u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2756
2757 /* advance */
2758 GCPhys += PAGE_SIZE;
2759 iHCPage++;
2760 iPTDst++;
2761 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2762 && GCPhys <= pRam->GCPhysLast);
2763 }
2764 else if (pRam)
2765 {
2766 Log(("Invalid pages at %RGp\n", GCPhys));
2767 do
2768 {
2769 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2770 GCPhys += PAGE_SIZE;
2771 iPTDst++;
2772 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2773 && GCPhys < pRam->GCPhys);
2774 }
2775 else
2776 {
2777 Log(("Invalid pages at %RGp (2)\n", GCPhys));
2778 for ( ; iPTDst < RT_ELEMENTS(pPTDst->a); iPTDst++)
2779 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2780 }
2781 } /* while more PTEs */
2782 } /* 4KB / 4MB */
2783 }
2784 else
2785 AssertRelease(!PdeDst.n.u1Present);
2786
2787 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2788 if (RT_FAILURE(rc))
2789 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPTFailed));
2790 return rc;
2791
2792#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
2793 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2794 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
2795
2796
2797 /*
2798 * Validate input a little bit.
2799 */
2800 int rc = VINF_SUCCESS;
2801# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2802 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2803 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
2804
2805# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2806 /* Fetch the pgm pool shadow descriptor. */
2807 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
2808 Assert(pShwPde);
2809# endif
2810
2811# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2812# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2813 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2814 PPGMPOOLPAGE pShwPde;
2815 PX86PDPAE pPDDst;
2816 PSHWPDE pPdeDst;
2817
2818 /* Fetch the pgm pool shadow descriptor. */
2819 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
2820 AssertRCSuccessReturn(rc, rc);
2821 Assert(pShwPde);
2822
2823 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2824 pPdeDst = &pPDDst->a[iPDDst];
2825# else
2826 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm!*/;
2827 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
2828# endif
2829
2830# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2831 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2832 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2833 PX86PDPAE pPDDst;
2834 PX86PDPT pPdptDst;
2835 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2836 AssertRCSuccessReturn(rc, rc);
2837 Assert(pPDDst);
2838 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2839
2840 /* Fetch the pgm pool shadow descriptor. */
2841 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2842 Assert(pShwPde);
2843
2844# elif PGM_SHW_TYPE == PGM_TYPE_EPT
2845 const unsigned iPdpt = (GCPtrPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
2846 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2847 PEPTPD pPDDst;
2848 PEPTPDPT pPdptDst;
2849
2850 rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
2851 if (rc != VINF_SUCCESS)
2852 {
2853 AssertRC(rc);
2854 return rc;
2855 }
2856 Assert(pPDDst);
2857 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2858
2859 /* Fetch the pgm pool shadow descriptor. */
2860 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & EPT_PDPTE_PG_MASK);
2861 Assert(pShwPde);
2862# endif
2863 SHWPDE PdeDst = *pPdeDst;
2864
2865 Assert(!(PdeDst.u & PGM_PDFLAGS_MAPPING));
2866 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2867
2868 GSTPDE PdeSrc;
2869 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2870 PdeSrc.n.u1Present = 1;
2871 PdeSrc.n.u1Write = 1;
2872 PdeSrc.n.u1Accessed = 1;
2873 PdeSrc.n.u1User = 1;
2874
2875 /*
2876 * Allocate & map the page table.
2877 */
2878 PSHWPT pPTDst;
2879 PPGMPOOLPAGE pShwPage;
2880 RTGCPHYS GCPhys;
2881
2882 /* Virtual address = physical address */
2883 GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK;
2884# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_EPT || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2885 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2886# else
2887 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2888# endif
2889
2890 if ( rc == VINF_SUCCESS
2891 || rc == VINF_PGM_CACHED_PAGE)
2892 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2893 else
2894 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2895
2896 PdeDst.u &= X86_PDE_AVL_MASK;
2897 PdeDst.u |= pShwPage->Core.Key;
2898 PdeDst.n.u1Present = 1;
2899 PdeDst.n.u1Write = 1;
2900# if PGM_SHW_TYPE == PGM_TYPE_EPT
2901 PdeDst.n.u1Execute = 1;
2902# else
2903 PdeDst.n.u1User = 1;
2904 PdeDst.n.u1Accessed = 1;
2905# endif
2906 *pPdeDst = PdeDst;
2907
2908 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
2909 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2910 return rc;
2911
2912#else
2913 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE));
2914 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2915 return VERR_INTERNAL_ERROR;
2916#endif
2917}
2918
2919
2920
2921/**
2922 * Prefetch a page/set of pages.
2923 *
2924 * Typically used to sync commonly used pages before entering raw mode
2925 * after a CR3 reload.
2926 *
2927 * @returns VBox status code.
2928 * @param pVM VM handle.
2929 * @param GCPtrPage Page to invalidate.
2930 */
2931PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCPTR GCPtrPage)
2932{
2933#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2934 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
2935 /*
2936 * Check that all Guest levels thru the PDE are present, getting the
2937 * PD and PDE in the processes.
2938 */
2939 int rc = VINF_SUCCESS;
2940# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2941# if PGM_GST_TYPE == PGM_TYPE_32BIT
2942 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
2943 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
2944# elif PGM_GST_TYPE == PGM_TYPE_PAE
2945 unsigned iPDSrc;
2946# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2947 X86PDPE PdpeSrc;
2948 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
2949# else
2950 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
2951# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
2952 if (!pPDSrc)
2953 return VINF_SUCCESS; /* not present */
2954# elif PGM_GST_TYPE == PGM_TYPE_AMD64
2955 unsigned iPDSrc;
2956 PX86PML4E pPml4eSrc;
2957 X86PDPE PdpeSrc;
2958 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
2959 if (!pPDSrc)
2960 return VINF_SUCCESS; /* not present */
2961# endif
2962 const GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2963# else
2964 PGSTPD pPDSrc = NULL;
2965 const unsigned iPDSrc = 0;
2966 GSTPDE PdeSrc;
2967
2968 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2969 PdeSrc.n.u1Present = 1;
2970 PdeSrc.n.u1Write = 1;
2971 PdeSrc.n.u1Accessed = 1;
2972 PdeSrc.n.u1User = 1;
2973# endif
2974
2975 if (PdeSrc.n.u1Present && PdeSrc.n.u1Accessed)
2976 {
2977# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2978 const X86PDE PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage);
2979# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2980# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2981 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2982 PX86PDPAE pPDDst;
2983 X86PDEPAE PdeDst;
2984# if PGM_GST_TYPE != PGM_TYPE_PAE
2985 X86PDPE PdpeSrc;
2986
2987 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
2988 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
2989# endif
2990 int rc = pgmShwSyncPaePDPtr(pVM, GCPtrPage, &PdpeSrc, &pPDDst);
2991 if (rc != VINF_SUCCESS)
2992 {
2993 AssertRC(rc);
2994 return rc;
2995 }
2996 Assert(pPDDst);
2997 PdeDst = pPDDst->a[iPDDst];
2998# else
2999 const X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);
3000# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
3001
3002# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3003 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3004 PX86PDPAE pPDDst;
3005 X86PDEPAE PdeDst;
3006
3007# if PGM_GST_TYPE == PGM_TYPE_PROT
3008 /* AMD-V nested paging */
3009 X86PML4E Pml4eSrc;
3010 X86PDPE PdpeSrc;
3011 PX86PML4E pPml4eSrc = &Pml4eSrc;
3012
3013 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3014 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3015 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3016# endif
3017
3018 int rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3019 if (rc != VINF_SUCCESS)
3020 {
3021 AssertRC(rc);
3022 return rc;
3023 }
3024 Assert(pPDDst);
3025 PdeDst = pPDDst->a[iPDDst];
3026# endif
3027 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
3028 {
3029 if (!PdeDst.n.u1Present)
3030 /** r=bird: This guy will set the A bit on the PDE, probably harmless. */
3031 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
3032 else
3033 {
3034 /** @note We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
3035 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
3036 * makes no sense to prefetch more than one page.
3037 */
3038 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
3039 if (RT_SUCCESS(rc))
3040 rc = VINF_SUCCESS;
3041 }
3042 }
3043 }
3044 return rc;
3045
3046#elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3047 return VINF_SUCCESS; /* ignore */
3048#endif
3049}
3050
3051
3052
3053
3054/**
3055 * Syncs a page during a PGMVerifyAccess() call.
3056 *
3057 * @returns VBox status code (informational included).
3058 * @param GCPtrPage The address of the page to sync.
3059 * @param fPage The effective guest page flags.
3060 * @param uErr The trap error code.
3061 */
3062PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
3063{
3064 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));
3065
3066 Assert(!HWACCMIsNestedPagingActive(pVM));
3067#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
3068 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3069
3070# ifndef IN_RING0
3071 if (!(fPage & X86_PTE_US))
3072 {
3073 /*
3074 * Mark this page as safe.
3075 */
3076 /** @todo not correct for pages that contain both code and data!! */
3077 Log(("CSAMMarkPage %RGv; scanned=%d\n", GCPtrPage, true));
3078 CSAMMarkPage(pVM, (RTRCPTR)GCPtrPage, true);
3079 }
3080# endif
3081
3082 /*
3083 * Get guest PD and index.
3084 */
3085# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3086# if PGM_GST_TYPE == PGM_TYPE_32BIT
3087 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
3088 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3089# elif PGM_GST_TYPE == PGM_TYPE_PAE
3090 unsigned iPDSrc;
3091# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3092 X86PDPE PdpeSrc;
3093 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
3094# else
3095 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
3096# endif
3097
3098 if (pPDSrc)
3099 {
3100 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3101 return VINF_EM_RAW_GUEST_TRAP;
3102 }
3103# elif PGM_GST_TYPE == PGM_TYPE_AMD64
3104 unsigned iPDSrc;
3105 PX86PML4E pPml4eSrc;
3106 X86PDPE PdpeSrc;
3107 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3108 if (!pPDSrc)
3109 {
3110 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3111 return VINF_EM_RAW_GUEST_TRAP;
3112 }
3113# endif
3114# else
3115 PGSTPD pPDSrc = NULL;
3116 const unsigned iPDSrc = 0;
3117# endif
3118 int rc = VINF_SUCCESS;
3119
3120 /*
3121 * First check if the shadow pd is present.
3122 */
3123# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3124 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
3125# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3126 PX86PDEPAE pPdeDst;
3127# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3128 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3129 PX86PDPAE pPDDst;
3130# if PGM_GST_TYPE != PGM_TYPE_PAE
3131 X86PDPE PdpeSrc;
3132
3133 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
3134 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
3135# endif
3136 rc = pgmShwSyncPaePDPtr(pVM, GCPtrPage, &PdpeSrc, &pPDDst);
3137 if (rc != VINF_SUCCESS)
3138 {
3139 AssertRC(rc);
3140 return rc;
3141 }
3142 Assert(pPDDst);
3143 pPdeDst = &pPDDst->a[iPDDst];
3144# else
3145 pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
3146# endif
3147# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3148 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3149 PX86PDPAE pPDDst;
3150 PX86PDEPAE pPdeDst;
3151
3152# if PGM_GST_TYPE == PGM_TYPE_PROT
3153 /* AMD-V nested paging */
3154 X86PML4E Pml4eSrc;
3155 X86PDPE PdpeSrc;
3156 PX86PML4E pPml4eSrc = &Pml4eSrc;
3157
3158 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3159 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3160 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3161# endif
3162
3163 rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3164 if (rc != VINF_SUCCESS)
3165 {
3166 AssertRC(rc);
3167 return rc;
3168 }
3169 Assert(pPDDst);
3170 pPdeDst = &pPDDst->a[iPDDst];
3171# endif
3172 if (!pPdeDst->n.u1Present)
3173 {
3174 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
3175 AssertRC(rc);
3176 if (rc != VINF_SUCCESS)
3177 return rc;
3178 }
3179
3180# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3181 /* Check for dirty bit fault */
3182 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
3183 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
3184 Log(("PGMVerifyAccess: success (dirty)\n"));
3185 else
3186 {
3187 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
3188#else
3189 {
3190 GSTPDE PdeSrc;
3191 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
3192 PdeSrc.n.u1Present = 1;
3193 PdeSrc.n.u1Write = 1;
3194 PdeSrc.n.u1Accessed = 1;
3195 PdeSrc.n.u1User = 1;
3196
3197#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
3198 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
3199 if (uErr & X86_TRAP_PF_US)
3200 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
3201 else /* supervisor */
3202 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
3203
3204 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
3205 if (RT_SUCCESS(rc))
3206 {
3207 /* Page was successfully synced */
3208 Log2(("PGMVerifyAccess: success (sync)\n"));
3209 rc = VINF_SUCCESS;
3210 }
3211 else
3212 {
3213 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", GCPtrPage, rc));
3214 return VINF_EM_RAW_GUEST_TRAP;
3215 }
3216 }
3217 return rc;
3218
3219#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3220
3221 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
3222 return VERR_INTERNAL_ERROR;
3223#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3224}
3225
3226
3227#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
3228# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
3229/**
3230 * Figures out which kind of shadow page this guest PDE warrants.
3231 *
3232 * @returns Shadow page kind.
3233 * @param pPdeSrc The guest PDE in question.
3234 * @param cr4 The current guest cr4 value.
3235 */
3236DECLINLINE(PGMPOOLKIND) PGM_BTH_NAME(CalcPageKind)(const GSTPDE *pPdeSrc, uint32_t cr4)
3237{
3238# if PMG_GST_TYPE == PGM_TYPE_AMD64
3239 if (!pPdeSrc->n.u1Size)
3240# else
3241 if (!pPdeSrc->n.u1Size || !(cr4 & X86_CR4_PSE))
3242# endif
3243 return BTH_PGMPOOLKIND_PT_FOR_PT;
3244 //switch (pPdeSrc->u & (X86_PDE4M_RW | X86_PDE4M_US /*| X86_PDE4M_PAE_NX*/))
3245 //{
3246 // case 0:
3247 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RO;
3248 // case X86_PDE4M_RW:
3249 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW;
3250 // case X86_PDE4M_US:
3251 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US;
3252 // case X86_PDE4M_RW | X86_PDE4M_US:
3253 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US;
3254# if 0
3255 // case X86_PDE4M_PAE_NX:
3256 // return BTH_PGMPOOLKIND_PT_FOR_BIG_NX;
3257 // case X86_PDE4M_RW | X86_PDE4M_PAE_NX:
3258 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_NX;
3259 // case X86_PDE4M_US | X86_PDE4M_PAE_NX:
3260 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US_NX;
3261 // case X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PAE_NX:
3262 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US_NX;
3263# endif
3264 return BTH_PGMPOOLKIND_PT_FOR_BIG;
3265 //}
3266}
3267# endif
3268#endif
3269
3270#undef MY_STAM_COUNTER_INC
3271#define MY_STAM_COUNTER_INC(a) do { } while (0)
3272
3273
3274/**
3275 * Syncs the paging hierarchy starting at CR3.
3276 *
3277 * @returns VBox status code, no specials.
3278 * @param pVM The virtual machine.
3279 * @param cr0 Guest context CR0 register
3280 * @param cr3 Guest context CR3 register
3281 * @param cr4 Guest context CR4 register
3282 * @param fGlobal Including global page directories or not
3283 */
3284PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
3285{
3286 if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
3287 fGlobal = true; /* Change this CR3 reload to be a global one. */
3288
3289 LogFlow(("SyncCR3 %d\n", fGlobal));
3290
3291#if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3292 /*
3293 * Update page access handlers.
3294 * The virtual are always flushed, while the physical are only on demand.
3295 * WARNING: We are incorrectly not doing global flushing on Virtual Handler updates. We'll
3296 * have to look into that later because it will have a bad influence on the performance.
3297 * @note SvL: There's no need for that. Just invalidate the virtual range(s).
3298 * bird: Yes, but that won't work for aliases.
3299 */
3300 /** @todo this MUST go away. See #1557. */
3301 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3302 PGM_GST_NAME(HandlerVirtualUpdate)(pVM, cr4);
3303 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3304#endif
3305
3306#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3307 /*
3308 * Nested / EPT - almost no work.
3309 */
3310 /** @todo check if this is really necessary; the call does it as well... */
3311 HWACCMFlushTLB(pVM);
3312 return VINF_SUCCESS;
3313
3314#elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3315 /*
3316 * AMD64 (Shw & Gst) - No need to check all paging levels; we zero
3317 * out the shadow parts when the guest modifies its tables.
3318 */
3319 return VINF_SUCCESS;
3320
3321#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3322
3323# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3324# ifdef PGM_WITHOUT_MAPPINGS
3325 Assert(pVM->pgm.s.fMappingsFixed);
3326 return VINF_SUCCESS;
3327# else
3328 /* Nothing to do when mappings are fixed. */
3329 if (pVM->pgm.s.fMappingsFixed)
3330 return VINF_SUCCESS;
3331
3332 int rc = PGMMapResolveConflicts(pVM);
3333 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
3334 if (rc == VINF_PGM_SYNC_CR3)
3335 {
3336 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3337 return VINF_PGM_SYNC_CR3;
3338 }
3339# endif
3340 return VINF_SUCCESS;
3341# else
3342 /*
3343 * PAE and 32-bit legacy mode (shadow).
3344 * (Guest PAE, 32-bit legacy, protected and real modes.)
3345 */
3346 Assert(fGlobal || (cr4 & X86_CR4_PGE));
3347 MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Global) : &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3NotGlobal));
3348
3349# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
3350 bool const fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3351
3352 /*
3353 * Get page directory addresses.
3354 */
3355# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3356 PX86PDE pPDEDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, 0);
3357# else /* PGM_SHW_TYPE == PGM_TYPE_PAE */
3358# if PGM_GST_TYPE == PGM_TYPE_32BIT
3359 PX86PDEPAE pPDEDst = NULL;
3360# endif
3361# endif
3362
3363# if PGM_GST_TYPE == PGM_TYPE_32BIT
3364 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3365 Assert(pPDSrc);
3366# if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3367 Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == (RTR3PTR)pPDSrc);
3368# endif
3369# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
3370
3371 /*
3372 * Iterate the the CR3 page.
3373 */
3374 PPGMMAPPING pMapping;
3375 unsigned iPdNoMapping;
3376 const bool fRawR0Enabled = EMIsRawRing0Enabled(pVM);
3377 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3378
3379 /* Only check mappings if they are supposed to be put into the shadow page table. */
3380 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
3381 {
3382 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3383 iPdNoMapping = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U;
3384 }
3385 else
3386 {
3387 pMapping = 0;
3388 iPdNoMapping = ~0U;
3389 }
3390
3391# if PGM_GST_TYPE == PGM_TYPE_PAE
3392 for (uint64_t iPdpt = 0; iPdpt < GST_PDPE_ENTRIES; iPdpt++)
3393 {
3394 unsigned iPDSrc;
3395 X86PDPE PdpeSrc;
3396 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT, &iPDSrc, &PdpeSrc);
3397 PX86PDEPAE pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT);
3398 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
3399
3400 if (pPDSrc == NULL)
3401 {
3402 /* PDPE not present */
3403 if (pPdptDst->a[iPdpt].n.u1Present)
3404 {
3405 LogFlow(("SyncCR3: guest PDPE %lld not present; clear shw pdpe\n", iPdpt));
3406 /* for each page directory entry */
3407 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3408 {
3409 if ( pPDEDst[iPD].n.u1Present
3410 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING))
3411 {
3412 pgmPoolFree(pVM, pPDEDst[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
3413 pPDEDst[iPD].u = 0;
3414 }
3415 }
3416 }
3417 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
3418 pPdptDst->a[iPdpt].n.u1Present = 0;
3419 continue;
3420 }
3421# else /* PGM_GST_TYPE != PGM_TYPE_PAE */
3422 {
3423# endif /* PGM_GST_TYPE != PGM_TYPE_PAE */
3424 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3425 {
3426# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3427 if ((iPD & 255) == 0) /* Start of new PD. */
3428 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)iPD << GST_PD_SHIFT);
3429# endif
3430# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3431 Assert(pgmShwGet32BitPDEPtr(&pVM->pgm.s, (uint32_t)iPD << SHW_PD_SHIFT) == pPDEDst);
3432# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3433# if defined(VBOX_STRICT) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* Unfortuantely not reliable with PGMR0DynMap and multiple VMs. */
3434 RTGCPTR GCPtrStrict = (uint32_t)iPD << GST_PD_SHIFT;
3435# if PGM_GST_TYPE == PGM_TYPE_PAE
3436 GCPtrStrict |= iPdpt << X86_PDPT_SHIFT;
3437# endif
3438 AssertMsg(pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict) == pPDEDst, ("%p vs %p (%RGv)\n", pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict), pPDEDst, GCPtrStrict));
3439# endif /* VBOX_STRICT */
3440# endif
3441 GSTPDE PdeSrc = pPDSrc->a[iPD];
3442 if ( PdeSrc.n.u1Present
3443 && (PdeSrc.n.u1User || fRawR0Enabled))
3444 {
3445# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3446 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3447 && !defined(PGM_WITHOUT_MAPPINGS)
3448
3449 /*
3450 * Check for conflicts with GC mappings.
3451 */
3452# if PGM_GST_TYPE == PGM_TYPE_PAE
3453 if (iPD + iPdpt * X86_PG_PAE_ENTRIES == iPdNoMapping)
3454# else
3455 if (iPD == iPdNoMapping)
3456# endif
3457 {
3458 if (pVM->pgm.s.fMappingsFixed)
3459 {
3460 /* It's fixed, just skip the mapping. */
3461 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3462 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
3463 iPD += cPTs - 1;
3464# if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
3465 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
3466# else
3467 pPDEDst += cPTs;
3468# endif
3469 pMapping = pMapping->CTX_SUFF(pNext);
3470 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3471 continue;
3472 }
3473# ifdef IN_RING3
3474# if PGM_GST_TYPE == PGM_TYPE_32BIT
3475 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3476# elif PGM_GST_TYPE == PGM_TYPE_PAE
3477 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3478# endif
3479 if (RT_FAILURE(rc))
3480 return rc;
3481
3482 /*
3483 * Update iPdNoMapping and pMapping.
3484 */
3485 pMapping = pVM->pgm.s.pMappingsR3;
3486 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3487 pMapping = pMapping->pNextR3;
3488 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3489# else /* !IN_RING3 */
3490 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3491 return VINF_PGM_SYNC_CR3;
3492# endif /* !IN_RING3 */
3493 }
3494# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3495 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3496# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3497
3498 /*
3499 * Sync page directory entry.
3500 *
3501 * The current approach is to allocated the page table but to set
3502 * the entry to not-present and postpone the page table synching till
3503 * it's actually used.
3504 */
3505# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3506 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3507# elif PGM_GST_TYPE == PGM_TYPE_PAE
3508 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES; NOREF(iPdShw);
3509# else
3510 const unsigned iPdShw = iPD; NOREF(iPdShw);
3511# endif
3512 {
3513 SHWPDE PdeDst = *pPDEDst;
3514 if (PdeDst.n.u1Present)
3515 {
3516 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
3517 RTGCPHYS GCPhys;
3518 if ( !PdeSrc.b.u1Size
3519 || !fBigPagesSupported)
3520 {
3521 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
3522# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3523 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
3524 GCPhys |= i * (PAGE_SIZE / 2);
3525# endif
3526 }
3527 else
3528 {
3529 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
3530# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3531 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
3532 GCPhys |= i * X86_PAGE_2M_SIZE;
3533# endif
3534 }
3535
3536 if ( pShwPage->GCPhys == GCPhys
3537 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)
3538 && ( pShwPage->fCached
3539 || ( !fGlobal
3540 && ( false
3541# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
3542 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3543 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */
3544 || ( !pShwPage->fSeenNonGlobal
3545 && (cr4 & X86_CR4_PGE))
3546# endif
3547 )
3548 )
3549 )
3550 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))
3551 || ( fBigPagesSupported
3552 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)
3553 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))
3554 )
3555 )
3556 {
3557# ifdef VBOX_WITH_STATISTICS
3558 if ( !fGlobal
3559 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3560 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))
3561 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPD));
3562 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))
3563 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPT));
3564 else
3565 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstCacheHit));
3566# endif /* VBOX_WITH_STATISTICS */
3567 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
3568 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
3569 //# ifdef PGMPOOL_WITH_CACHE
3570 // pgmPoolCacheUsed(pPool, pShwPage);
3571 //# endif
3572 }
3573 else
3574 {
3575 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);
3576 pPDEDst->u = 0;
3577 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreed));
3578 }
3579 }
3580 else
3581 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstNotPresent));
3582
3583 /* advance */
3584 pPDEDst++;
3585 } /* foreach 2MB PAE PDE in 4MB guest PDE */
3586 }
3587# if PGM_GST_TYPE == PGM_TYPE_PAE
3588 else if (iPD + iPdpt * X86_PG_PAE_ENTRIES != iPdNoMapping)
3589# else
3590 else if (iPD != iPdNoMapping)
3591# endif
3592 {
3593 /*
3594 * Check if there is any page directory to mark not present here.
3595 */
3596# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3597 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3598# elif PGM_GST_TYPE == PGM_TYPE_PAE
3599 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES;
3600# else
3601 const unsigned iPdShw = iPD;
3602# endif
3603 {
3604 if (pPDEDst->n.u1Present)
3605 {
3606 pgmPoolFree(pVM, pPDEDst->u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdShw);
3607 pPDEDst->u = 0;
3608 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreedSrcNP));
3609 }
3610 pPDEDst++;
3611 }
3612 }
3613 else
3614 {
3615# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3616 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3617 && !defined(PGM_WITHOUT_MAPPINGS)
3618
3619 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3620
3621 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3622 if (pVM->pgm.s.fMappingsFixed)
3623 {
3624 /* It's fixed, just skip the mapping. */
3625 pMapping = pMapping->CTX_SUFF(pNext);
3626 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3627 }
3628 else
3629 {
3630 /*
3631 * Check for conflicts for subsequent pagetables
3632 * and advance to the next mapping.
3633 */
3634 iPdNoMapping = ~0U;
3635 unsigned iPT = cPTs;
3636 while (iPT-- > 1)
3637 {
3638 if ( pPDSrc->a[iPD + iPT].n.u1Present
3639 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))
3640 {
3641# ifdef IN_RING3
3642# if PGM_GST_TYPE == PGM_TYPE_32BIT
3643 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3644# elif PGM_GST_TYPE == PGM_TYPE_PAE
3645 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3646# endif
3647 if (RT_FAILURE(rc))
3648 return rc;
3649
3650 /*
3651 * Update iPdNoMapping and pMapping.
3652 */
3653 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3654 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3655 pMapping = pMapping->CTX_SUFF(pNext);
3656 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3657 break;
3658# else
3659 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3660 return VINF_PGM_SYNC_CR3;
3661# endif
3662 }
3663 }
3664 if (iPdNoMapping == ~0U && pMapping)
3665 {
3666 pMapping = pMapping->CTX_SUFF(pNext);
3667 if (pMapping)
3668 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT;
3669 }
3670 }
3671
3672 /* advance. */
3673 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
3674 iPD += cPTs - 1;
3675# if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
3676 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
3677# else
3678 pPDEDst += cPTs;
3679# endif
3680# if PGM_GST_TYPE != PGM_SHW_TYPE
3681 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE);
3682# endif
3683# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3684 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3685# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3686 }
3687
3688 } /* for iPD */
3689 } /* for each PDPTE (PAE) */
3690 return VINF_SUCCESS;
3691
3692# else /* guest real and protected mode */
3693 return VINF_SUCCESS;
3694# endif
3695#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
3696#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3697}
3698
3699
3700
3701
3702#ifdef VBOX_STRICT
3703#ifdef IN_RC
3704# undef AssertMsgFailed
3705# define AssertMsgFailed Log
3706#endif
3707#ifdef IN_RING3
3708# include <VBox/dbgf.h>
3709
3710/**
3711 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
3712 *
3713 * @returns VBox status code (VINF_SUCCESS).
3714 * @param pVM The VM handle.
3715 * @param cr3 The root of the hierarchy.
3716 * @param crr The cr4, only PAE and PSE is currently used.
3717 * @param fLongMode Set if long mode, false if not long mode.
3718 * @param cMaxDepth Number of levels to dump.
3719 * @param pHlp Pointer to the output functions.
3720 */
3721__BEGIN_DECLS
3722VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
3723__END_DECLS
3724
3725#endif
3726
3727/**
3728 * Checks that the shadow page table is in sync with the guest one.
3729 *
3730 * @returns The number of errors.
3731 * @param pVM The virtual machine.
3732 * @param cr3 Guest context CR3 register
3733 * @param cr4 Guest context CR4 register
3734 * @param GCPtr Where to start. Defaults to 0.
3735 * @param cb How much to check. Defaults to everything.
3736 */
3737PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
3738{
3739#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3740 return 0;
3741#else
3742 unsigned cErrors = 0;
3743
3744#if PGM_GST_TYPE == PGM_TYPE_PAE
3745 /** @todo currently broken; crashes below somewhere */
3746 AssertFailed();
3747#endif
3748
3749#if PGM_GST_TYPE == PGM_TYPE_32BIT \
3750 || PGM_GST_TYPE == PGM_TYPE_PAE \
3751 || PGM_GST_TYPE == PGM_TYPE_AMD64
3752
3753# if PGM_GST_TYPE == PGM_TYPE_AMD64
3754 bool fBigPagesSupported = true;
3755# else
3756 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3757# endif
3758 PPGM pPGM = &pVM->pgm.s;
3759 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */
3760 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */
3761# ifndef IN_RING0
3762 RTHCPHYS HCPhys; /* general usage. */
3763# endif
3764 int rc;
3765
3766 /*
3767 * Check that the Guest CR3 and all its mappings are correct.
3768 */
3769 AssertMsgReturn(pPGM->GCPhysCR3 == (cr3 & GST_CR3_PAGE_MASK),
3770 ("Invalid GCPhysCR3=%RGp cr3=%RGp\n", pPGM->GCPhysCR3, (RTGCPHYS)cr3),
3771 false);
3772# if !defined(IN_RING0) && PGM_GST_TYPE != PGM_TYPE_AMD64
3773# if PGM_GST_TYPE == PGM_TYPE_32BIT
3774 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGst32BitPdRC, NULL, &HCPhysShw);
3775# else
3776 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGstPaePdptRC, NULL, &HCPhysShw);
3777# endif
3778 AssertRCReturn(rc, 1);
3779 HCPhys = NIL_RTHCPHYS;
3780 rc = pgmRamGCPhys2HCPhys(pPGM, cr3 & GST_CR3_PAGE_MASK, &HCPhys);
3781 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false);
3782# if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3)
3783 RTGCPHYS GCPhys;
3784 rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys);
3785 AssertRCReturn(rc, 1);
3786 AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false);
3787# endif
3788# endif /* !IN_RING0 */
3789
3790 /*
3791 * Get and check the Shadow CR3.
3792 */
3793# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3794 unsigned cPDEs = X86_PG_ENTRIES;
3795 unsigned cIncrement = X86_PG_ENTRIES * PAGE_SIZE;
3796# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3797# if PGM_GST_TYPE == PGM_TYPE_32BIT
3798 unsigned cPDEs = X86_PG_PAE_ENTRIES * 4; /* treat it as a 2048 entry table. */
3799# else
3800 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3801# endif
3802 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3803# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3804 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3805 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3806# endif
3807 if (cb != ~(RTGCPTR)0)
3808 cPDEs = RT_MIN(cb >> SHW_PD_SHIFT, 1);
3809
3810/** @todo call the other two PGMAssert*() functions. */
3811
3812# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3813 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3814# endif
3815
3816# if PGM_GST_TYPE == PGM_TYPE_AMD64
3817 unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3818
3819 for (; iPml4 < X86_PG_PAE_ENTRIES; iPml4++)
3820 {
3821 PPGMPOOLPAGE pShwPdpt = NULL;
3822 PX86PML4E pPml4eSrc;
3823 PX86PML4E pPml4eDst;
3824 RTGCPHYS GCPhysPdptSrc;
3825
3826 pPml4eSrc = pgmGstGetLongModePML4EPtr(&pVM->pgm.s, iPml4);
3827 pPml4eDst = pgmShwGetLongModePML4EPtr(&pVM->pgm.s, iPml4);
3828
3829 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */
3830 if (!pPml4eDst->n.u1Present)
3831 {
3832 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3833 continue;
3834 }
3835
3836 pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
3837 GCPhysPdptSrc = pPml4eSrc->u & X86_PML4E_PG_MASK_FULL;
3838
3839 if (pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present)
3840 {
3841 AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3842 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3843 cErrors++;
3844 continue;
3845 }
3846
3847 if (GCPhysPdptSrc != pShwPdpt->GCPhys)
3848 {
3849 AssertMsgFailed(("Physical address doesn't match! iPml4 %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
3850 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3851 cErrors++;
3852 continue;
3853 }
3854
3855 if ( pPml4eDst->n.u1User != pPml4eSrc->n.u1User
3856 || pPml4eDst->n.u1Write != pPml4eSrc->n.u1Write
3857 || pPml4eDst->n.u1NoExecute != pPml4eSrc->n.u1NoExecute)
3858 {
3859 AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3860 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3861 cErrors++;
3862 continue;
3863 }
3864# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3865 {
3866# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3867
3868# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3869 /*
3870 * Check the PDPTEs too.
3871 */
3872 unsigned iPdpt = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
3873
3874 for (;iPdpt <= SHW_PDPT_MASK; iPdpt++)
3875 {
3876 unsigned iPDSrc;
3877 PPGMPOOLPAGE pShwPde = NULL;
3878 PX86PDPE pPdpeDst;
3879 RTGCPHYS GCPhysPdeSrc;
3880# if PGM_GST_TYPE == PGM_TYPE_PAE
3881 X86PDPE PdpeSrc;
3882 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtr, &iPDSrc, &PdpeSrc);
3883 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
3884# else
3885 PX86PML4E pPml4eSrc;
3886 X86PDPE PdpeSrc;
3887 PX86PDPT pPdptDst;
3888 PX86PDPAE pPDDst;
3889 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3890
3891 rc = pgmShwGetLongModePDPtr(pVM, GCPtr, NULL, &pPdptDst, &pPDDst);
3892 if (rc != VINF_SUCCESS)
3893 {
3894 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
3895 GCPtr += 512 * _2M;
3896 continue; /* next PDPTE */
3897 }
3898 Assert(pPDDst);
3899# endif
3900 Assert(iPDSrc == 0);
3901
3902 pPdpeDst = &pPdptDst->a[iPdpt];
3903
3904 if (!pPdpeDst->n.u1Present)
3905 {
3906 GCPtr += 512 * _2M;
3907 continue; /* next PDPTE */
3908 }
3909
3910 pShwPde = pgmPoolGetPage(pPool, pPdpeDst->u & X86_PDPE_PG_MASK);
3911 GCPhysPdeSrc = PdpeSrc.u & X86_PDPE_PG_MASK;
3912
3913 if (pPdpeDst->n.u1Present != PdpeSrc.n.u1Present)
3914 {
3915 AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3916 GCPtr += 512 * _2M;
3917 cErrors++;
3918 continue;
3919 }
3920
3921 if (GCPhysPdeSrc != pShwPde->GCPhys)
3922 {
3923# if PGM_GST_TYPE == PGM_TYPE_AMD64
3924 AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3925# else
3926 AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3927# endif
3928 GCPtr += 512 * _2M;
3929 cErrors++;
3930 continue;
3931 }
3932
3933# if PGM_GST_TYPE == PGM_TYPE_AMD64
3934 if ( pPdpeDst->lm.u1User != PdpeSrc.lm.u1User
3935 || pPdpeDst->lm.u1Write != PdpeSrc.lm.u1Write
3936 || pPdpeDst->lm.u1NoExecute != PdpeSrc.lm.u1NoExecute)
3937 {
3938 AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3939 GCPtr += 512 * _2M;
3940 cErrors++;
3941 continue;
3942 }
3943# endif
3944
3945# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3946 {
3947# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3948# if PGM_GST_TYPE == PGM_TYPE_32BIT
3949 GSTPD const *pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3950# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3951 PCX86PD pPDDst = pgmShwGet32BitPDPtr(&pVM->pgm.s);
3952# endif
3953# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
3954 /*
3955 * Iterate the shadow page directory.
3956 */
3957 GCPtr = (GCPtr >> SHW_PD_SHIFT) << SHW_PD_SHIFT;
3958 unsigned iPDDst = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
3959
3960 for (;
3961 iPDDst < cPDEs;
3962 iPDDst++, GCPtr += cIncrement)
3963 {
3964# if PGM_SHW_TYPE == PGM_TYPE_PAE
3965 const SHWPDE PdeDst = *pgmShwGetPaePDEPtr(pPGM, GCPtr);
3966# else
3967 const SHWPDE PdeDst = pPDDst->a[iPDDst];
3968# endif
3969 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
3970 {
3971 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3972 if ((PdeDst.u & X86_PDE_AVL_MASK) != PGM_PDFLAGS_MAPPING)
3973 {
3974 AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
3975 cErrors++;
3976 continue;
3977 }
3978 }
3979 else if ( (PdeDst.u & X86_PDE_P)
3980 || ((PdeDst.u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY))
3981 )
3982 {
3983 HCPhysShw = PdeDst.u & SHW_PDE_PG_MASK;
3984 PPGMPOOLPAGE pPoolPage = pgmPoolGetPageByHCPhys(pVM, HCPhysShw);
3985 if (!pPoolPage)
3986 {
3987 AssertMsgFailed(("Invalid page table address %RHp at %RGv! PdeDst=%#RX64\n",
3988 HCPhysShw, GCPtr, (uint64_t)PdeDst.u));
3989 cErrors++;
3990 continue;
3991 }
3992 const SHWPT *pPTDst = (const SHWPT *)PGMPOOL_PAGE_2_PTR(pVM, pPoolPage);
3993
3994 if (PdeDst.u & (X86_PDE4M_PWT | X86_PDE4M_PCD))
3995 {
3996 AssertMsgFailed(("PDE flags PWT and/or PCD is set at %RGv! These flags are not virtualized! PdeDst=%#RX64\n",
3997 GCPtr, (uint64_t)PdeDst.u));
3998 cErrors++;
3999 }
4000
4001 if (PdeDst.u & (X86_PDE4M_G | X86_PDE4M_D))
4002 {
4003 AssertMsgFailed(("4K PDE reserved flags at %RGv! PdeDst=%#RX64\n",
4004 GCPtr, (uint64_t)PdeDst.u));
4005 cErrors++;
4006 }
4007
4008 const GSTPDE PdeSrc = pPDSrc->a[(iPDDst >> (GST_PD_SHIFT - SHW_PD_SHIFT)) & GST_PD_MASK];
4009 if (!PdeSrc.n.u1Present)
4010 {
4011 AssertMsgFailed(("Guest PDE at %RGv is not present! PdeDst=%#RX64 PdeSrc=%#RX64\n",
4012 GCPtr, (uint64_t)PdeDst.u, (uint64_t)PdeSrc.u));
4013 cErrors++;
4014 continue;
4015 }
4016
4017 if ( !PdeSrc.b.u1Size
4018 || !fBigPagesSupported)
4019 {
4020 GCPhysGst = PdeSrc.u & GST_PDE_PG_MASK;
4021# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4022 GCPhysGst |= (iPDDst & 1) * (PAGE_SIZE / 2);
4023# endif
4024 }
4025 else
4026 {
4027# if PGM_GST_TYPE == PGM_TYPE_32BIT
4028 if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK)
4029 {
4030 AssertMsgFailed(("Guest PDE at %RGv is using PSE36 or similar! PdeSrc=%#RX64\n",
4031 GCPtr, (uint64_t)PdeSrc.u));
4032 cErrors++;
4033 continue;
4034 }
4035# endif
4036 GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
4037# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4038 GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT);
4039# endif
4040 }
4041
4042 if ( pPoolPage->enmKind
4043 != (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
4044 {
4045 AssertMsgFailed(("Invalid shadow page table kind %d at %RGv! PdeSrc=%#RX64\n",
4046 pPoolPage->enmKind, GCPtr, (uint64_t)PdeSrc.u));
4047 cErrors++;
4048 }
4049
4050 PPGMPAGE pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4051 if (!pPhysPage)
4052 {
4053 AssertMsgFailed(("Cannot find guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
4054 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
4055 cErrors++;
4056 continue;
4057 }
4058
4059 if (GCPhysGst != pPoolPage->GCPhys)
4060 {
4061 AssertMsgFailed(("GCPhysGst=%RGp != pPage->GCPhys=%RGp at %RGv\n",
4062 GCPhysGst, pPoolPage->GCPhys, GCPtr));
4063 cErrors++;
4064 continue;
4065 }
4066
4067 if ( !PdeSrc.b.u1Size
4068 || !fBigPagesSupported)
4069 {
4070 /*
4071 * Page Table.
4072 */
4073 const GSTPT *pPTSrc;
4074 rc = PGM_GCPHYS_2_PTR(pVM, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
4075 if (RT_FAILURE(rc))
4076 {
4077 AssertMsgFailed(("Cannot map/convert guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
4078 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
4079 cErrors++;
4080 continue;
4081 }
4082 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/))
4083 != (PdeDst.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/)))
4084 {
4085 /// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
4086 // (This problem will go away when/if we shadow multiple CR3s.)
4087 AssertMsgFailed(("4K PDE flags mismatch at %RGv! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4088 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4089 cErrors++;
4090 continue;
4091 }
4092 if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4093 {
4094 AssertMsgFailed(("4K PDEs cannot have PGM_PDFLAGS_TRACK_DIRTY set! GCPtr=%RGv PdeDst=%#RX64\n",
4095 GCPtr, (uint64_t)PdeDst.u));
4096 cErrors++;
4097 continue;
4098 }
4099
4100 /* iterate the page table. */
4101# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4102 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
4103 const unsigned offPTSrc = ((GCPtr >> SHW_PD_SHIFT) & 1) * 512;
4104# else
4105 const unsigned offPTSrc = 0;
4106# endif
4107 for (unsigned iPT = 0, off = 0;
4108 iPT < RT_ELEMENTS(pPTDst->a);
4109 iPT++, off += PAGE_SIZE)
4110 {
4111 const SHWPTE PteDst = pPTDst->a[iPT];
4112
4113 /* skip not-present entries. */
4114 if (!(PteDst.u & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
4115 continue;
4116 Assert(PteDst.n.u1Present);
4117
4118 const GSTPTE PteSrc = pPTSrc->a[iPT + offPTSrc];
4119 if (!PteSrc.n.u1Present)
4120 {
4121# ifdef IN_RING3
4122 PGMAssertHandlerAndFlagsInSync(pVM);
4123 PGMR3DumpHierarchyGC(pVM, cr3, cr4, (PdeSrc.u & GST_PDE_PG_MASK));
4124# endif
4125 AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n",
4126 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u, pPTSrc, iPT + offPTSrc, PdeSrc.au32[0],
4127 (PdeSrc.u & GST_PDE_PG_MASK) + (iPT + offPTSrc)*sizeof(PteSrc)));
4128 cErrors++;
4129 continue;
4130 }
4131
4132 uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
4133# if 1 /** @todo sync accessed bit properly... */
4134 fIgnoreFlags |= X86_PTE_A;
4135# endif
4136
4137 /* match the physical addresses */
4138 HCPhysShw = PteDst.u & SHW_PTE_PG_MASK;
4139 GCPhysGst = PteSrc.u & GST_PTE_PG_MASK;
4140
4141# ifdef IN_RING3
4142 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4143 if (RT_FAILURE(rc))
4144 {
4145 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4146 {
4147 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
4148 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4149 cErrors++;
4150 continue;
4151 }
4152 }
4153 else if (HCPhysShw != (HCPhys & SHW_PTE_PG_MASK))
4154 {
4155 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4156 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4157 cErrors++;
4158 continue;
4159 }
4160# endif
4161
4162 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4163 if (!pPhysPage)
4164 {
4165# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4166 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4167 {
4168 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
4169 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4170 cErrors++;
4171 continue;
4172 }
4173# endif
4174 if (PteDst.n.u1Write)
4175 {
4176 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4177 GCPtr + off, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4178 cErrors++;
4179 }
4180 fIgnoreFlags |= X86_PTE_RW;
4181 }
4182 else if (HCPhysShw != (PGM_PAGE_GET_HCPHYS(pPhysPage) & SHW_PTE_PG_MASK))
4183 {
4184 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4185 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4186 cErrors++;
4187 continue;
4188 }
4189
4190 /* flags */
4191 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4192 {
4193 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4194 {
4195 if (PteDst.n.u1Write)
4196 {
4197 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! HCPhys=%RHp PteSrc=%#RX64 PteDst=%#RX64\n",
4198 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4199 cErrors++;
4200 continue;
4201 }
4202 fIgnoreFlags |= X86_PTE_RW;
4203 }
4204 else
4205 {
4206 if (PteDst.n.u1Present)
4207 {
4208 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! HCPhys=%RHp PteSrc=%#RX64 PteDst=%#RX64\n",
4209 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4210 cErrors++;
4211 continue;
4212 }
4213 fIgnoreFlags |= X86_PTE_P;
4214 }
4215 }
4216 else
4217 {
4218 if (!PteSrc.n.u1Dirty && PteSrc.n.u1Write)
4219 {
4220 if (PteDst.n.u1Write)
4221 {
4222 AssertMsgFailed(("!DIRTY page at %RGv is writable! PteSrc=%#RX64 PteDst=%#RX64\n",
4223 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4224 cErrors++;
4225 continue;
4226 }
4227 if (!(PteDst.u & PGM_PTFLAGS_TRACK_DIRTY))
4228 {
4229 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4230 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4231 cErrors++;
4232 continue;
4233 }
4234 if (PteDst.n.u1Dirty)
4235 {
4236 AssertMsgFailed(("!DIRTY page at %RGv is marked DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4237 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4238 cErrors++;
4239 }
4240# if 0 /** @todo sync access bit properly... */
4241 if (PteDst.n.u1Accessed != PteSrc.n.u1Accessed)
4242 {
4243 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4244 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4245 cErrors++;
4246 }
4247 fIgnoreFlags |= X86_PTE_RW;
4248# else
4249 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4250# endif
4251 }
4252 else if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4253 {
4254 /* access bit emulation (not implemented). */
4255 if (PteSrc.n.u1Accessed || PteDst.n.u1Present)
4256 {
4257 AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
4258 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4259 cErrors++;
4260 continue;
4261 }
4262 if (!PteDst.n.u1Accessed)
4263 {
4264 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
4265 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4266 cErrors++;
4267 }
4268 fIgnoreFlags |= X86_PTE_P;
4269 }
4270# ifdef DEBUG_sandervl
4271 fIgnoreFlags |= X86_PTE_D | X86_PTE_A;
4272# endif
4273 }
4274
4275 if ( (PteSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4276 && (PteSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags)
4277 )
4278 {
4279 AssertMsgFailed(("Flags mismatch at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
4280 GCPtr + off, (uint64_t)PteSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4281 fIgnoreFlags, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4282 cErrors++;
4283 continue;
4284 }
4285 } /* foreach PTE */
4286 }
4287 else
4288 {
4289 /*
4290 * Big Page.
4291 */
4292 uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
4293 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
4294 {
4295 if (PdeDst.n.u1Write)
4296 {
4297 AssertMsgFailed(("!DIRTY page at %RGv is writable! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4298 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4299 cErrors++;
4300 continue;
4301 }
4302 if (!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY))
4303 {
4304 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4305 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4306 cErrors++;
4307 continue;
4308 }
4309# if 0 /** @todo sync access bit properly... */
4310 if (PdeDst.n.u1Accessed != PdeSrc.b.u1Accessed)
4311 {
4312 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4313 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4314 cErrors++;
4315 }
4316 fIgnoreFlags |= X86_PTE_RW;
4317# else
4318 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4319# endif
4320 }
4321 else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4322 {
4323 /* access bit emulation (not implemented). */
4324 if (PdeSrc.b.u1Accessed || PdeDst.n.u1Present)
4325 {
4326 AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4327 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4328 cErrors++;
4329 continue;
4330 }
4331 if (!PdeDst.n.u1Accessed)
4332 {
4333 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4334 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4335 cErrors++;
4336 }
4337 fIgnoreFlags |= X86_PTE_P;
4338 }
4339
4340 if ((PdeSrc.u & ~fIgnoreFlags) != (PdeDst.u & ~fIgnoreFlags))
4341 {
4342 AssertMsgFailed(("Flags mismatch (B) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
4343 GCPtr, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PdeDst.u & ~fIgnoreFlags,
4344 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4345 cErrors++;
4346 }
4347
4348 /* iterate the page table. */
4349 for (unsigned iPT = 0, off = 0;
4350 iPT < RT_ELEMENTS(pPTDst->a);
4351 iPT++, off += PAGE_SIZE, GCPhysGst += PAGE_SIZE)
4352 {
4353 const SHWPTE PteDst = pPTDst->a[iPT];
4354
4355 if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4356 {
4357 AssertMsgFailed(("The PTE at %RGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
4358 GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4359 cErrors++;
4360 }
4361
4362 /* skip not-present entries. */
4363 if (!PteDst.n.u1Present) /** @todo deal with ALL handlers and CSAM !P pages! */
4364 continue;
4365
4366 fIgnoreFlags = X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT | X86_PTE_D | X86_PTE_A | X86_PTE_G | X86_PTE_PAE_NX;
4367
4368 /* match the physical addresses */
4369 HCPhysShw = PteDst.u & X86_PTE_PAE_PG_MASK;
4370
4371# ifdef IN_RING3
4372 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4373 if (RT_FAILURE(rc))
4374 {
4375 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4376 {
4377 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4378 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4379 cErrors++;
4380 }
4381 }
4382 else if (HCPhysShw != (HCPhys & X86_PTE_PAE_PG_MASK))
4383 {
4384 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4385 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4386 cErrors++;
4387 continue;
4388 }
4389# endif
4390 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4391 if (!pPhysPage)
4392 {
4393# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4394 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4395 {
4396 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4397 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4398 cErrors++;
4399 continue;
4400 }
4401# endif
4402 if (PteDst.n.u1Write)
4403 {
4404 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4405 GCPtr + off, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4406 cErrors++;
4407 }
4408 fIgnoreFlags |= X86_PTE_RW;
4409 }
4410 else if (HCPhysShw != (pPhysPage->HCPhys & X86_PTE_PAE_PG_MASK))
4411 {
4412 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4413 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4414 cErrors++;
4415 continue;
4416 }
4417
4418 /* flags */
4419 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4420 {
4421 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4422 {
4423 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
4424 {
4425 if (PteDst.n.u1Write)
4426 {
4427 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n",
4428 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4429 cErrors++;
4430 continue;
4431 }
4432 fIgnoreFlags |= X86_PTE_RW;
4433 }
4434 }
4435 else
4436 {
4437 if (PteDst.n.u1Present)
4438 {
4439 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n",
4440 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4441 cErrors++;
4442 continue;
4443 }
4444 fIgnoreFlags |= X86_PTE_P;
4445 }
4446 }
4447
4448 if ( (PdeSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4449 && (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags) /* lazy phys handler dereg. */
4450 )
4451 {
4452 AssertMsgFailed(("Flags mismatch (BT) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
4453 GCPtr + off, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4454 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4455 cErrors++;
4456 continue;
4457 }
4458 } /* for each PTE */
4459 }
4460 }
4461 /* not present */
4462
4463 } /* for each PDE */
4464
4465 } /* for each PDPTE */
4466
4467 } /* for each PML4E */
4468
4469# ifdef DEBUG
4470 if (cErrors)
4471 LogFlow(("AssertCR3: cErrors=%d\n", cErrors));
4472# endif
4473
4474#endif /* GST == 32BIT, PAE or AMD64 */
4475 return cErrors;
4476
4477#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
4478}
4479#endif /* VBOX_STRICT */
4480
4481
4482/**
4483 * Sets up the CR3 for shadow paging
4484 *
4485 * @returns Strict VBox status code.
4486 * @retval VINF_SUCCESS.
4487 *
4488 * @param pVM VM handle.
4489 * @param GCPhysCR3 The physical address in the CR3 register.
4490 */
4491PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
4492{
4493 /* Update guest paging info. */
4494#if PGM_GST_TYPE == PGM_TYPE_32BIT \
4495 || PGM_GST_TYPE == PGM_TYPE_PAE \
4496 || PGM_GST_TYPE == PGM_TYPE_AMD64
4497
4498 LogFlow(("MapCR3: %RGp\n", GCPhysCR3));
4499
4500 /*
4501 * Map the page CR3 points at.
4502 */
4503 RTHCPHYS HCPhysGuestCR3;
4504 RTHCPTR HCPtrGuestCR3;
4505 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
4506 if (RT_SUCCESS(rc))
4507 {
4508 rc = PGMMap(pVM, (RTGCPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
4509 if (RT_SUCCESS(rc))
4510 {
4511# ifdef IN_RC
4512 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
4513# endif
4514# if PGM_GST_TYPE == PGM_TYPE_32BIT
4515 pVM->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
4516# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4517 pVM->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
4518# endif
4519 pVM->pgm.s.pGst32BitPdRC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
4520
4521# elif PGM_GST_TYPE == PGM_TYPE_PAE
4522 unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
4523 pVM->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4524# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4525 pVM->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4526# endif
4527 pVM->pgm.s.pGstPaePdptRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
4528 Log(("Cached mapping %RRv\n", pVM->pgm.s.pGstPaePdptRC));
4529
4530 /*
4531 * Map the 4 PDs too.
4532 */
4533 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
4534 RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
4535 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
4536 {
4537 if (pGuestPDPT->a[i].n.u1Present)
4538 {
4539 RTHCPTR HCPtr;
4540 RTHCPHYS HCPhys;
4541 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
4542 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
4543 if (RT_SUCCESS(rc2))
4544 {
4545 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
4546 AssertRCReturn(rc, rc);
4547
4548 pVM->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr;
4549# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4550 pVM->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr;
4551# endif
4552 pVM->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
4553 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
4554 PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */
4555 continue;
4556 }
4557 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
4558 }
4559
4560 pVM->pgm.s.apGstPaePDsR3[i] = 0;
4561# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4562 pVM->pgm.s.apGstPaePDsR0[i] = 0;
4563# endif
4564 pVM->pgm.s.apGstPaePDsRC[i] = 0;
4565 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4566 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */
4567 }
4568
4569# elif PGM_GST_TYPE == PGM_TYPE_AMD64
4570 pVM->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
4571# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4572 pVM->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
4573# endif
4574# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
4575 if (!HWACCMIsNestedPagingActive(pVM))
4576 {
4577 /*
4578 * Update the shadow root page as well since that's not fixed.
4579 */
4580 /** @todo Move this into PGMAllBth.h. */
4581 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4582 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4583 {
4584 /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
4585 /** @todo Coordinate this better with the pool. */
4586 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)->enmKind != PGMPOOLKIND_FREE)
4587 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
4588 pVM->pgm.s.pShwPageCR3R3 = 0;
4589 pVM->pgm.s.pShwPageCR3R0 = 0;
4590 pVM->pgm.s.pShwRootR3 = 0;
4591# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4592 pVM->pgm.s.pShwRootR0 = 0;
4593# endif
4594 pVM->pgm.s.HCPhysShwCR3 = 0;
4595 }
4596
4597 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
4598 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4599 if (rc == VERR_PGM_POOL_FLUSHED)
4600 {
4601 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
4602 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
4603 return VINF_PGM_SYNC_CR3;
4604 }
4605 AssertRCReturn(rc, rc);
4606# ifdef IN_RING0
4607 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4608# else
4609 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4610# endif
4611 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
4612 Assert(pVM->pgm.s.pShwRootR3);
4613# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4614 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4615# endif
4616 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
4617 rc = VINF_SUCCESS; /* clear it - pgmPoolAlloc returns hints. */
4618 }
4619# endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
4620# endif
4621 }
4622 else
4623 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4624 }
4625 else
4626 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4627
4628#else /* prot/real stub */
4629 int rc = VINF_SUCCESS;
4630#endif
4631
4632#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4633 /* Update shadow paging info for guest modes with paging (32, pae, 64). */
4634# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4635 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4636 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
4637 && ( PGM_GST_TYPE != PGM_TYPE_REAL \
4638 && PGM_GST_TYPE != PGM_TYPE_PROT))
4639
4640 Assert(!HWACCMIsNestedPagingActive(pVM));
4641
4642 /*
4643 * Update the shadow root page as well since that's not fixed.
4644 */
4645 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4646 PPGMPOOLPAGE pOldShwPageCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
4647 uint32_t iOldShwUserTable = pVM->pgm.s.iShwUserTable;
4648 uint32_t iOldShwUser = pVM->pgm.s.iShwUser;
4649 PPGMPOOLPAGE pNewShwPageCR3;
4650
4651 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
4652 rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3);
4653 if (rc == VERR_PGM_POOL_FLUSHED)
4654 {
4655 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
4656 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
4657 return VINF_PGM_SYNC_CR3;
4658 }
4659 AssertRCReturn(rc, rc);
4660 rc = VINF_SUCCESS;
4661
4662# ifdef IN_RC
4663 /** NOTE: We can't deal with jumps to ring 3 here as we're now in an inconsistent state! */
4664 VMMGCLogDisable(pVM);
4665# endif
4666 /* Mark the page as locked; disallow flushing. */
4667 pgmPoolLockPage(pPool, pNewShwPageCR3);
4668
4669 pVM->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
4670 pVM->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
4671 pVM->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3;
4672# ifdef IN_RING0
4673 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4674 pVM->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4675# elif defined(IN_RC)
4676 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4677 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4678# else
4679 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4680 pVM->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4681# endif
4682 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
4683 Assert(pVM->pgm.s.pShwRootR3);
4684# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4685 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4686# endif
4687 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
4688
4689# ifndef PGM_WITHOUT_MAPPINGS
4690 /* Apply all hypervisor mappings to the new CR3.
4691 * Note that SyncCR3 will be executed in case CR3 is changed in a guest paging mode; this will
4692 * make sure we check for conflicts in the new CR3 root.
4693 */
4694# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
4695 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
4696# endif
4697 rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);
4698 AssertRCReturn(rc, rc);
4699# endif
4700
4701 /* Set the current hypervisor CR3. */
4702 CPUMSetHyperCR3(pVM, PGMGetHyperCR3(pVM));
4703
4704# ifdef IN_RC
4705 VMMGCLogEnable(pVM);
4706# endif
4707
4708 /* Clean up the old CR3 root. */
4709 if (pOldShwPageCR3)
4710 {
4711 Assert(pOldShwPageCR3->enmKind != PGMPOOLKIND_FREE);
4712# ifndef PGM_WITHOUT_MAPPINGS
4713 /* Remove the hypervisor mappings from the shadow page table. */
4714 pgmMapDeactivateCR3(pVM, pOldShwPageCR3);
4715# endif
4716 /* Mark the page as unlocked; allow flushing again. */
4717 pgmPoolUnlockPage(pPool, pOldShwPageCR3);
4718
4719 pgmPoolFreeByPage(pPool, pOldShwPageCR3, iOldShwUser, iOldShwUserTable);
4720 }
4721
4722# endif
4723#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
4724
4725 return rc;
4726}
4727
4728/**
4729 * Unmaps the shadow CR3.
4730 *
4731 * @returns VBox status, no specials.
4732 * @param pVM VM handle.
4733 */
4734PGM_BTH_DECL(int, UnmapCR3)(PVM pVM)
4735{
4736 LogFlow(("UnmapCR3\n"));
4737
4738 int rc = VINF_SUCCESS;
4739
4740 /* Update guest paging info. */
4741#if PGM_GST_TYPE == PGM_TYPE_32BIT
4742 pVM->pgm.s.pGst32BitPdR3 = 0;
4743#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4744 pVM->pgm.s.pGst32BitPdR0 = 0;
4745#endif
4746 pVM->pgm.s.pGst32BitPdRC = 0;
4747
4748#elif PGM_GST_TYPE == PGM_TYPE_PAE
4749 pVM->pgm.s.pGstPaePdptR3 = 0;
4750# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4751 pVM->pgm.s.pGstPaePdptR0 = 0;
4752# endif
4753 pVM->pgm.s.pGstPaePdptRC = 0;
4754 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
4755 {
4756 pVM->pgm.s.apGstPaePDsR3[i] = 0;
4757# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4758 pVM->pgm.s.apGstPaePDsR0[i] = 0;
4759# endif
4760 pVM->pgm.s.apGstPaePDsRC[i] = 0;
4761 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4762 }
4763
4764#elif PGM_GST_TYPE == PGM_TYPE_AMD64
4765 pVM->pgm.s.pGstAmd64Pml4R3 = 0;
4766# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4767 pVM->pgm.s.pGstAmd64Pml4R0 = 0;
4768# endif
4769# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
4770 if (!HWACCMIsNestedPagingActive(pVM))
4771 {
4772 pVM->pgm.s.pShwRootR3 = 0;
4773# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4774 pVM->pgm.s.pShwRootR0 = 0;
4775# endif
4776 pVM->pgm.s.HCPhysShwCR3 = 0;
4777 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4778 {
4779 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4780 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
4781 pVM->pgm.s.pShwPageCR3R3 = 0;
4782 pVM->pgm.s.pShwPageCR3R0 = 0;
4783 }
4784 }
4785# endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
4786
4787#else /* prot/real mode stub */
4788 /* nothing to do */
4789#endif
4790
4791#if defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */
4792 /* Update shadow paging info. */
4793# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4794 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4795 || PGM_SHW_TYPE == PGM_TYPE_AMD64))
4796
4797# if PGM_GST_TYPE != PGM_TYPE_REAL
4798 Assert(!HWACCMIsNestedPagingActive(pVM));
4799# endif
4800
4801# ifndef PGM_WITHOUT_MAPPINGS
4802 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4803 /* Remove the hypervisor mappings from the shadow page table. */
4804 pgmMapDeactivateCR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4805# endif
4806
4807 pVM->pgm.s.pShwRootR3 = 0;
4808# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4809 pVM->pgm.s.pShwRootR0 = 0;
4810# endif
4811 pVM->pgm.s.HCPhysShwCR3 = 0;
4812 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4813 {
4814 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4815
4816 /* Mark the page as unlocked; allow flushing again. */
4817 pgmPoolUnlockPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4818
4819 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pVM->pgm.s.iShwUser, pVM->pgm.s.iShwUserTable);
4820 pVM->pgm.s.pShwPageCR3R3 = 0;
4821 pVM->pgm.s.pShwPageCR3R0 = 0;
4822 pVM->pgm.s.iShwUser = 0;
4823 pVM->pgm.s.iShwUserTable = 0;
4824 }
4825# endif
4826#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY && !IN_RC*/
4827
4828 return rc;
4829}
4830
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette