VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllBth.h@ 17142

Last change on this file since 17142 was 17142, checked in by vboxsync, 16 years ago

VBOX_WITH_PGMPOOL_PAGING_ONLY: fixed saved state restore

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 208.0 KB
Line 
1/* $Id: PGMAllBth.h 17142 2009-02-25 17:10:23Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
4 *
5 * This file is a big challenge!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Internal Functions *
26*******************************************************************************/
27__BEGIN_DECLS
28PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
29PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCPTR GCPtrPage);
30PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
31PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
32PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
33PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCPTR Addr, unsigned fPage, unsigned uErr);
34PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCPTR GCPtrPage);
35PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
36#ifdef VBOX_STRICT
37PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
38#endif
39#ifdef PGMPOOL_WITH_USER_TRACKING
40DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
41#endif
42PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
43PGM_BTH_DECL(int, UnmapCR3)(PVM pVM);
44__END_DECLS
45
46
47/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
48#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
49# error "Invalid combination; PAE guest implies PAE shadow"
50#endif
51
52#if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
53 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
54# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
55#endif
56
57#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
58 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
59# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
60#endif
61
62#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT) \
63 || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
64# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
65#endif
66
67#ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */
68# define PGM_WITHOUT_MAPPINGS
69#endif
70
71
72#ifndef IN_RING3
73/**
74 * #PF Handler for raw-mode guest execution.
75 *
76 * @returns VBox status code (appropriate for trap handling and GC return).
77 * @param pVM VM Handle.
78 * @param uErr The trap error code.
79 * @param pRegFrame Trap register frame.
80 * @param pvFault The fault address.
81 */
82PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
83{
84# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
85 PGMDynCheckLocks(pVM);
86# endif
87
88# if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
89 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
90 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
91
92# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
93 /*
94 * Hide the instruction fetch trap indicator for now.
95 */
96 /** @todo NXE will change this and we must fix NXE in the switcher too! */
97 if (uErr & X86_TRAP_PF_ID)
98 {
99 uErr &= ~X86_TRAP_PF_ID;
100 TRPMSetErrorCode(pVM, uErr);
101 }
102# endif
103
104 /*
105 * Get PDs.
106 */
107 int rc;
108# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
109# if PGM_GST_TYPE == PGM_TYPE_32BIT
110 const unsigned iPDSrc = pvFault >> GST_PD_SHIFT;
111 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
112
113# elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
114
115# if PGM_GST_TYPE == PGM_TYPE_PAE
116 unsigned iPDSrc;
117# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
118 X86PDPE PdpeSrc;
119 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, &PdpeSrc);
120# else
121 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, NULL);
122# endif
123
124# elif PGM_GST_TYPE == PGM_TYPE_AMD64
125 unsigned iPDSrc;
126 PX86PML4E pPml4eSrc;
127 X86PDPE PdpeSrc;
128 PGSTPD pPDSrc;
129
130 pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
131 Assert(pPml4eSrc);
132# endif
133
134 /* Quick check for a valid guest trap. (PAE & AMD64) */
135 if (!pPDSrc)
136 {
137# if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64
138 LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%RGp\n", (int)((pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK));
139# else
140 LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%RGp\n", iPDSrc, CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK));
141# endif
142 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2GuestTrap; });
143 TRPMSetErrorCode(pVM, uErr);
144 return VINF_EM_RAW_GUEST_TRAP;
145 }
146# endif
147
148# else /* !PGM_WITH_PAGING */
149 PGSTPD pPDSrc = NULL;
150 const unsigned iPDSrc = 0;
151# endif /* !PGM_WITH_PAGING */
152
153
154# if PGM_SHW_TYPE == PGM_TYPE_32BIT
155 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT;
156 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVM->pgm.s);
157
158# elif PGM_SHW_TYPE == PGM_TYPE_PAE
159 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */
160
161# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
162 PX86PDPAE pPDDst;
163# if PGM_GST_TYPE != PGM_TYPE_PAE
164 X86PDPE PdpeSrc;
165
166 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
167 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
168# endif
169 rc = pgmShwSyncPaePDPtr(pVM, pvFault, &PdpeSrc, &pPDDst);
170 if (rc != VINF_SUCCESS)
171 {
172 AssertRC(rc);
173 return rc;
174 }
175 Assert(pPDDst);
176
177# else
178 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, pvFault);
179
180 /* Did we mark the PDPT as not present in SyncCR3? */
181 unsigned iPdpt = (pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
182 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
183 if (!pPdptDst->a[iPdpt].n.u1Present)
184 pPdptDst->a[iPdpt].n.u1Present = 1;
185# endif
186
187# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
188 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
189 PX86PDPAE pPDDst;
190# if PGM_GST_TYPE == PGM_TYPE_PROT
191 /* AMD-V nested paging */
192 X86PML4E Pml4eSrc;
193 X86PDPE PdpeSrc;
194 PX86PML4E pPml4eSrc = &Pml4eSrc;
195
196 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
197 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
198 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
199# endif
200
201 rc = pgmShwSyncLongModePDPtr(pVM, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
202 if (rc != VINF_SUCCESS)
203 {
204 AssertRC(rc);
205 return rc;
206 }
207 Assert(pPDDst);
208
209# elif PGM_SHW_TYPE == PGM_TYPE_EPT
210 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
211 PEPTPD pPDDst;
212
213 rc = pgmShwGetEPTPDPtr(pVM, pvFault, NULL, &pPDDst);
214 if (rc != VINF_SUCCESS)
215 {
216 AssertRC(rc);
217 return rc;
218 }
219 Assert(pPDDst);
220# endif
221
222# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
223 /*
224 * If we successfully correct the write protection fault due to dirty bit
225 * tracking, or this page fault is a genuine one, then return immediately.
226 */
227 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
228 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
229 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
230 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
231 || rc == VINF_EM_RAW_GUEST_TRAP)
232 {
233 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
234 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVM->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVM->pgm.s.StatRZTrap0eTime2GuestTrap; });
235 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
236 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
237 }
238
239 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0ePD[iPDSrc]);
240# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
241
242 /*
243 * A common case is the not-present error caused by lazy page table syncing.
244 *
245 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
246 * so we can safely assume that the shadow PT is present when calling SyncPage later.
247 *
248 * On failure, we ASSUME that SyncPT is out of memory or detected some kind
249 * of mapping conflict and defer to SyncCR3 in R3.
250 * (Again, we do NOT support access handlers for non-present guest pages.)
251 *
252 */
253# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
254 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
255# else
256 GSTPDE PdeSrc;
257 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
258 PdeSrc.n.u1Present = 1;
259 PdeSrc.n.u1Write = 1;
260 PdeSrc.n.u1Accessed = 1;
261 PdeSrc.n.u1User = 1;
262# endif
263 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
264 && !pPDDst->a[iPDDst].n.u1Present
265 && PdeSrc.n.u1Present
266 )
267
268 {
269 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2SyncPT; });
270 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
271 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
272 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, pvFault);
273 if (RT_SUCCESS(rc))
274 {
275 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
276 return rc;
277 }
278 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
279 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
280 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeSyncPT, f);
281 return VINF_PGM_SYNC_CR3;
282 }
283
284# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
285 /*
286 * Check if this address is within any of our mappings.
287 *
288 * This is *very* fast and it's gonna save us a bit of effort below and prevent
289 * us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).
290 * (BTW, it's impossible to have physical access handlers in a mapping.)
291 */
292 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
293 {
294 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
295 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
296 for ( ; pMapping; pMapping = pMapping->CTX_SUFF(pNext))
297 {
298 if (pvFault < pMapping->GCPtr)
299 break;
300 if (pvFault - pMapping->GCPtr < pMapping->cb)
301 {
302 /*
303 * The first thing we check is if we've got an undetected conflict.
304 */
305 if (!pVM->pgm.s.fMappingsFixed)
306 {
307 unsigned iPT = pMapping->cb >> GST_PD_SHIFT;
308 while (iPT-- > 0)
309 if (pPDSrc->a[iPDSrc + iPT].n.u1Present)
310 {
311 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eConflicts);
312 Log(("Trap0e: Detected Conflict %RGv-%RGv\n", pMapping->GCPtr, pMapping->GCPtrLast));
313 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
314 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
315 return VINF_PGM_SYNC_CR3;
316 }
317 }
318
319 /*
320 * Check if the fault address is in a virtual page access handler range.
321 */
322 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->HyperVirtHandlers, pvFault);
323 if ( pCur
324 && pvFault - pCur->Core.Key < pCur->cb
325 && uErr & X86_TRAP_PF_RW)
326 {
327# ifdef IN_RC
328 STAM_PROFILE_START(&pCur->Stat, h);
329 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
330 STAM_PROFILE_STOP(&pCur->Stat, h);
331# else
332 AssertFailed();
333 rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
334# endif
335 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersMapping);
336 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
337 return rc;
338 }
339
340 /*
341 * Pretend we're not here and let the guest handle the trap.
342 */
343 TRPMSetErrorCode(pVM, uErr & ~X86_TRAP_PF_P);
344 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPFMapping);
345 LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));
346 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
347 return VINF_EM_RAW_GUEST_TRAP;
348 }
349 }
350 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeMapping, a);
351 } /* pgmAreMappingsEnabled(&pVM->pgm.s) */
352# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
353
354 /*
355 * Check if this fault address is flagged for special treatment,
356 * which means we'll have to figure out the physical address and
357 * check flags associated with it.
358 *
359 * ASSUME that we can limit any special access handling to pages
360 * in page tables which the guest believes to be present.
361 */
362 if (PdeSrc.n.u1Present)
363 {
364 RTGCPHYS GCPhys = NIL_RTGCPHYS;
365
366# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
367# if PGM_GST_TYPE == PGM_TYPE_AMD64
368 bool fBigPagesSupported = true;
369# else
370 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
371# endif
372 if ( PdeSrc.b.u1Size
373 && fBigPagesSupported)
374 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc)
375 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
376 else
377 {
378 PGSTPT pPTSrc;
379 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
380 if (RT_SUCCESS(rc))
381 {
382 unsigned iPTESrc = (pvFault >> GST_PT_SHIFT) & GST_PT_MASK;
383 if (pPTSrc->a[iPTESrc].n.u1Present)
384 GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;
385 }
386 }
387# else
388 /* No paging so the fault address is the physical address */
389 GCPhys = (RTGCPHYS)(pvFault & ~PAGE_OFFSET_MASK);
390# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
391
392 /*
393 * If we have a GC address we'll check if it has any flags set.
394 */
395 if (GCPhys != NIL_RTGCPHYS)
396 {
397 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
398
399 PPGMPAGE pPage;
400 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
401 if (RT_SUCCESS(rc))
402 {
403 if ( PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage)
404 || PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
405 {
406 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
407 {
408 /*
409 * Physical page access handler.
410 */
411 const RTGCPHYS GCPhysFault = GCPhys | (pvFault & PAGE_OFFSET_MASK);
412 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
413 if (pCur)
414 {
415# ifdef PGM_SYNC_N_PAGES
416 /*
417 * If the region is write protected and we got a page not present fault, then sync
418 * the pages. If the fault was caused by a read, then restart the instruction.
419 * In case of write access continue to the GC write handler.
420 *
421 * ASSUMES that there is only one handler per page or that they have similar write properties.
422 */
423 if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
424 && !(uErr & X86_TRAP_PF_P))
425 {
426 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
427 if ( RT_FAILURE(rc)
428 || !(uErr & X86_TRAP_PF_RW)
429 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
430 {
431 AssertRC(rc);
432 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
433 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
434 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
435 return rc;
436 }
437 }
438# endif
439
440 AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
441 || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
442 ("Unexpected trap for physical handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
443
444# if defined(IN_RC) || defined(IN_RING0)
445 if (pCur->CTX_SUFF(pfnHandler))
446 {
447 STAM_PROFILE_START(&pCur->Stat, h);
448 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pCur->CTX_SUFF(pvUser));
449 STAM_PROFILE_STOP(&pCur->Stat, h);
450 }
451 else
452# endif
453 rc = VINF_EM_RAW_EMULATE_INSTR;
454 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersPhysical);
455 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
456 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndPhys; });
457 return rc;
458 }
459 }
460# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
461 else
462 {
463# ifdef PGM_SYNC_N_PAGES
464 /*
465 * If the region is write protected and we got a page not present fault, then sync
466 * the pages. If the fault was caused by a read, then restart the instruction.
467 * In case of write access continue to the GC write handler.
468 */
469 if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
470 && !(uErr & X86_TRAP_PF_P))
471 {
472 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
473 if ( RT_FAILURE(rc)
474 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
475 || !(uErr & X86_TRAP_PF_RW))
476 {
477 AssertRC(rc);
478 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
479 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
480 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; });
481 return rc;
482 }
483 }
484# endif
485 /*
486 * Ok, it's an virtual page access handler.
487 *
488 * Since it's faster to search by address, we'll do that first
489 * and then retry by GCPhys if that fails.
490 */
491 /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
492 /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
493 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
494 */
495 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
496 if (pCur)
497 {
498 AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb)
499 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
500 || !(uErr & X86_TRAP_PF_P)
501 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
502 ("Unexpected trap for virtual handler: %RGv (phys=%RGp) HCPhys=%HGp uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
503
504 if ( pvFault - pCur->Core.Key < pCur->cb
505 && ( uErr & X86_TRAP_PF_RW
506 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
507 {
508# ifdef IN_RC
509 STAM_PROFILE_START(&pCur->Stat, h);
510 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
511 STAM_PROFILE_STOP(&pCur->Stat, h);
512# else
513 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
514# endif
515 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtual);
516 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
517 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
518 return rc;
519 }
520 /* Unhandled part of a monitored page */
521 }
522 else
523 {
524 /* Check by physical address. */
525 PPGMVIRTHANDLER pCur;
526 unsigned iPage;
527 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK),
528 &pCur, &iPage);
529 Assert(RT_SUCCESS(rc) || !pCur);
530 if ( pCur
531 && ( uErr & X86_TRAP_PF_RW
532 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
533 {
534 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys);
535# ifdef IN_RC
536 RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK);
537 Assert(off < pCur->cb);
538 STAM_PROFILE_START(&pCur->Stat, h);
539 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off);
540 STAM_PROFILE_STOP(&pCur->Stat, h);
541# else
542 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
543# endif
544 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtualByPhys);
545 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
546 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
547 return rc;
548 }
549 }
550 }
551# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
552
553 /*
554 * There is a handled area of the page, but this fault doesn't belong to it.
555 * We must emulate the instruction.
556 *
557 * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
558 * we first check if this was a page-not-present fault for a page with only
559 * write access handlers. Restart the instruction if it wasn't a write access.
560 */
561 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersUnhandled);
562
563 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
564 && !(uErr & X86_TRAP_PF_P))
565 {
566 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
567 if ( RT_FAILURE(rc)
568 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
569 || !(uErr & X86_TRAP_PF_RW))
570 {
571 AssertRC(rc);
572 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersOutOfSync);
573 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
574 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
575 return rc;
576 }
577 }
578
579 /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
580 * It's writing to an unhandled part of the LDT page several million times.
581 */
582 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
583 LogFlow(("PGM: PGMInterpretInstruction -> rc=%d HCPhys=%RHp%s%s\n",
584 rc, pPage->HCPhys,
585 PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ? " phys" : "",
586 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ? " virt" : ""));
587 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
588 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndUnhandled; });
589 return rc;
590 } /* if any kind of handler */
591
592# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
593 if (uErr & X86_TRAP_PF_P)
594 {
595 /*
596 * The page isn't marked, but it might still be monitored by a virtual page access handler.
597 * (ASSUMES no temporary disabling of virtual handlers.)
598 */
599 /** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here,
600 * we should correct both the shadow page table and physical memory flags, and not only check for
601 * accesses within the handler region but for access to pages with virtual handlers. */
602 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
603 if (pCur)
604 {
605 AssertMsg( !(pvFault - pCur->Core.Key < pCur->cb)
606 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
607 || !(uErr & X86_TRAP_PF_P)
608 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
609 ("Unexpected trap for virtual handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
610
611 if ( pvFault - pCur->Core.Key < pCur->cb
612 && ( uErr & X86_TRAP_PF_RW
613 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
614 {
615# ifdef IN_RC
616 STAM_PROFILE_START(&pCur->Stat, h);
617 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
618 STAM_PROFILE_STOP(&pCur->Stat, h);
619# else
620 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
621# endif
622 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersVirtualUnmarked);
623 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
624 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2HndVirt; });
625 return rc;
626 }
627 }
628 }
629# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
630 }
631 else
632 {
633 /* When the guest accesses invalid physical memory (e.g. probing of RAM or accessing a remapped MMIO range), then we'll fall
634 * back to the recompiler to emulate the instruction.
635 */
636 LogFlow(("pgmPhysGetPageEx %RGp failed with %Rrc\n", GCPhys, rc));
637 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eHandlersInvalid);
638 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
639 return VINF_EM_RAW_EMULATE_INSTR;
640 }
641
642 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeHandlers, b);
643
644# ifdef PGM_OUT_OF_SYNC_IN_GC
645 /*
646 * We are here only if page is present in Guest page tables and trap is not handled
647 * by our handlers.
648 * Check it for page out-of-sync situation.
649 */
650 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
651
652 if (!(uErr & X86_TRAP_PF_P))
653 {
654 /*
655 * Page is not present in our page tables.
656 * Try to sync it!
657 * BTW, fPageShw is invalid in this branch!
658 */
659 if (uErr & X86_TRAP_PF_US)
660 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
661 else /* supervisor */
662 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
663
664# if defined(LOG_ENABLED) && !defined(IN_RING0)
665 RTGCPHYS GCPhys;
666 uint64_t fPageGst;
667 PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
668 Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst=%08llx GCPhys=%RGp scan=%d\n",
669 pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst, GCPhys, CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)));
670# endif /* LOG_ENABLED */
671
672# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0)
673 if (CPUMGetGuestCPL(pVM, pRegFrame) == 0)
674 {
675 uint64_t fPageGst;
676 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
677 if ( RT_SUCCESS(rc)
678 && !(fPageGst & X86_PTE_US))
679 {
680 /* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */
681 if ( pvFault == (RTGCPTR)pRegFrame->eip
682 || pvFault - pRegFrame->eip < 8 /* instruction crossing a page boundary */
683# ifdef CSAM_DETECT_NEW_CODE_PAGES
684 || ( !PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip)
685 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)) /* any new code we encounter here */
686# endif /* CSAM_DETECT_NEW_CODE_PAGES */
687 )
688 {
689 LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip));
690 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip);
691 if (rc != VINF_SUCCESS)
692 {
693 /*
694 * CSAM needs to perform a job in ring 3.
695 *
696 * Sync the page before going to the host context; otherwise we'll end up in a loop if
697 * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
698 */
699 LogFlow(("CSAM ring 3 job\n"));
700 int rc2 = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, 1, uErr);
701 AssertRC(rc2);
702
703 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
704 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2CSAM; });
705 return rc;
706 }
707 }
708# ifdef CSAM_DETECT_NEW_CODE_PAGES
709 else if ( uErr == X86_TRAP_PF_RW
710 && pRegFrame->ecx >= 0x100 /* early check for movswd count */
711 && pRegFrame->ecx < 0x10000)
712 {
713 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions
714 * to detect loading of new code pages.
715 */
716
717 /*
718 * Decode the instruction.
719 */
720 RTGCPTR PC;
721 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
722 if (rc == VINF_SUCCESS)
723 {
724 DISCPUSTATE Cpu;
725 uint32_t cbOp;
726 rc = EMInterpretDisasOneEx(pVM, PC, pRegFrame, &Cpu, &cbOp);
727
728 /* For now we'll restrict this to rep movsw/d instructions */
729 if ( rc == VINF_SUCCESS
730 && Cpu.pCurInstr->opcode == OP_MOVSWD
731 && (Cpu.prefix & PREFIX_REP))
732 {
733 CSAMMarkPossibleCodePage(pVM, pvFault);
734 }
735 }
736 }
737# endif /* CSAM_DETECT_NEW_CODE_PAGES */
738
739 /*
740 * Mark this page as safe.
741 */
742 /** @todo not correct for pages that contain both code and data!! */
743 Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true));
744 CSAMMarkPage(pVM, (RTRCPTR)pvFault, true);
745 }
746 }
747# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
748 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
749 if (RT_SUCCESS(rc))
750 {
751 /* The page was successfully synced, return to the guest. */
752 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
753 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSync; });
754 return VINF_SUCCESS;
755 }
756 }
757 else
758 {
759 /*
760 * A side effect of not flushing global PDEs are out of sync pages due
761 * to physical monitored regions, that are no longer valid.
762 * Assume for now it only applies to the read/write flag
763 */
764 if (RT_SUCCESS(rc) && (uErr & X86_TRAP_PF_RW))
765 {
766 if (uErr & X86_TRAP_PF_US)
767 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
768 else /* supervisor */
769 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
770
771
772 /*
773 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the page is not present, which is not true in this case.
774 */
775 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, pvFault, 1, uErr);
776 if (RT_SUCCESS(rc))
777 {
778 /*
779 * Page was successfully synced, return to guest.
780 */
781# ifdef VBOX_STRICT
782 RTGCPHYS GCPhys;
783 uint64_t fPageGst;
784 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, &GCPhys);
785 Assert(RT_SUCCESS(rc) && fPageGst & X86_PTE_RW);
786 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys, (uint64_t)fPageGst));
787
788 uint64_t fPageShw;
789 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
790 AssertMsg(RT_SUCCESS(rc) && fPageShw & X86_PTE_RW, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw));
791# endif /* VBOX_STRICT */
792 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
793 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2OutOfSyncHndObs; });
794 return VINF_SUCCESS;
795 }
796
797 /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */
798 if ( CPUMGetGuestCPL(pVM, pRegFrame) == 0
799 && ((CPUMGetGuestCR0(pVM) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG)
800 && (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P))
801 {
802 uint64_t fPageGst;
803 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
804 if ( RT_SUCCESS(rc)
805 && !(fPageGst & X86_PTE_RW))
806 {
807 rc = PGMInterpretInstruction(pVM, pRegFrame, pvFault);
808 if (RT_SUCCESS(rc))
809 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eWPEmulInRZ);
810 else
811 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eWPEmulToR3);
812 return rc;
813 }
814 AssertMsgFailed(("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc));
815 }
816 }
817
818# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
819# ifdef VBOX_STRICT
820 /*
821 * Check for VMM page flags vs. Guest page flags consistency.
822 * Currently only for debug purposes.
823 */
824 if (RT_SUCCESS(rc))
825 {
826 /* Get guest page flags. */
827 uint64_t fPageGst;
828 rc = PGMGstGetPage(pVM, pvFault, &fPageGst, NULL);
829 if (RT_SUCCESS(rc))
830 {
831 uint64_t fPageShw;
832 rc = PGMShwGetPage(pVM, pvFault, &fPageShw, NULL);
833
834 /*
835 * Compare page flags.
836 * Note: we have AVL, A, D bits desynched.
837 */
838 AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)),
839 ("Page flags mismatch! pvFault=%RGv GCPhys=%RGp fPageShw=%08llx fPageGst=%08llx\n", pvFault, GCPhys, fPageShw, fPageGst));
840 }
841 else
842 AssertMsgFailed(("PGMGstGetPage rc=%Rrc\n", rc));
843 }
844 else
845 AssertMsgFailed(("PGMGCGetPage rc=%Rrc\n", rc));
846# endif /* VBOX_STRICT */
847# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
848 }
849 STAM_PROFILE_STOP(&pVM->pgm.s.StatRZTrap0eTimeOutOfSync, c);
850# endif /* PGM_OUT_OF_SYNC_IN_GC */
851 }
852 else
853 {
854 /*
855 * Page not present in Guest OS or invalid page table address.
856 * This is potential virtual page access handler food.
857 *
858 * For the present we'll say that our access handlers don't
859 * work for this case - we've already discarded the page table
860 * not present case which is identical to this.
861 *
862 * When we perchance find we need this, we will probably have AVL
863 * trees (offset based) to operate on and we can measure their speed
864 * agains mapping a page table and probably rearrange this handling
865 * a bit. (Like, searching virtual ranges before checking the
866 * physical address.)
867 */
868 }
869 }
870
871
872# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
873 /*
874 * Conclusion, this is a guest trap.
875 */
876 LogFlow(("PGM: Unhandled #PF -> route trap to recompiler!\n"));
877 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPFUnh);
878 return VINF_EM_RAW_GUEST_TRAP;
879# else
880 /* present, but not a monitored page; perhaps the guest is probing physical memory */
881 return VINF_EM_RAW_EMULATE_INSTR;
882# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
883
884
885# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
886
887 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
888 return VERR_INTERNAL_ERROR;
889# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
890}
891#endif /* !IN_RING3 */
892
893
894/**
895 * Emulation of the invlpg instruction.
896 *
897 *
898 * @returns VBox status code.
899 *
900 * @param pVM VM handle.
901 * @param GCPtrPage Page to invalidate.
902 *
903 * @remark ASSUMES that the guest is updating before invalidating. This order
904 * isn't required by the CPU, so this is speculative and could cause
905 * trouble.
906 *
907 * @todo Flush page or page directory only if necessary!
908 * @todo Add a #define for simply invalidating the page.
909 */
910PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCPTR GCPtrPage)
911{
912#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \
913 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
914 && PGM_SHW_TYPE != PGM_TYPE_EPT
915 int rc;
916
917 LogFlow(("InvalidatePage %RGv\n", GCPtrPage));
918 /*
919 * Get the shadow PD entry and skip out if this PD isn't present.
920 * (Guessing that it is frequent for a shadow PDE to not be present, do this first.)
921 */
922# if PGM_SHW_TYPE == PGM_TYPE_32BIT
923 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
924 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
925
926# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
927 /* Fetch the pgm pool shadow descriptor. */
928 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
929 Assert(pShwPde);
930# endif
931
932# elif PGM_SHW_TYPE == PGM_TYPE_PAE
933 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT);
934 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
935
936 /* If the shadow PDPE isn't present, then skip the invalidate. */
937 if (!pPdptDst->a[iPdpt].n.u1Present)
938 {
939 Assert(!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING));
940 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
941 return VINF_SUCCESS;
942 }
943
944# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
945 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
946 PPGMPOOLPAGE pShwPde;
947 PX86PDPAE pPDDst;
948
949 /* Fetch the pgm pool shadow descriptor. */
950 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
951 AssertRCSuccessReturn(rc, rc);
952 Assert(pShwPde);
953
954 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
955 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
956# else
957 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - pool index only atm! */;
958 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
959# endif
960
961# else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
962 /* PML4 */
963 AssertReturn(pVM->pgm.s.pShwRootR3, VERR_INTERNAL_ERROR);
964
965 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;
966 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
967 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
968 PX86PDPAE pPDDst;
969 PX86PDPT pPdptDst;
970 PX86PML4E pPml4eDst;
971 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, &pPml4eDst, &pPdptDst, &pPDDst);
972 if (rc != VINF_SUCCESS)
973 {
974 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
975 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
976 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
977 PGM_INVL_GUEST_TLBS();
978 return VINF_SUCCESS;
979 }
980 Assert(pPDDst);
981
982 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
983 PX86PDPE pPdpeDst = &pPdptDst->a[iPdpt];
984
985 if (!pPdpeDst->n.u1Present)
986 {
987 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
988 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
989 PGM_INVL_GUEST_TLBS();
990 return VINF_SUCCESS;
991 }
992
993# endif /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
994
995 const SHWPDE PdeDst = *pPdeDst;
996 if (!PdeDst.n.u1Present)
997 {
998 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
999 return VINF_SUCCESS;
1000 }
1001
1002 /*
1003 * Get the guest PD entry and calc big page.
1004 */
1005# if PGM_GST_TYPE == PGM_TYPE_32BIT
1006 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
1007 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
1008 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
1009# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1010 unsigned iPDSrc;
1011# if PGM_GST_TYPE == PGM_TYPE_PAE
1012 X86PDPE PdpeSrc;
1013 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
1014# else /* AMD64 */
1015 PX86PML4E pPml4eSrc;
1016 X86PDPE PdpeSrc;
1017 PX86PDPAE pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
1018# endif
1019 GSTPDE PdeSrc;
1020
1021 if (pPDSrc)
1022 PdeSrc = pPDSrc->a[iPDSrc];
1023 else
1024 PdeSrc.u = 0;
1025# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1026
1027# if PGM_GST_TYPE == PGM_TYPE_AMD64
1028 const bool fIsBigPage = PdeSrc.b.u1Size;
1029# else
1030 const bool fIsBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1031# endif
1032
1033# ifdef IN_RING3
1034 /*
1035 * If a CR3 Sync is pending we may ignore the invalidate page operation
1036 * depending on the kind of sync and if it's a global page or not.
1037 * This doesn't make sense in GC/R0 so we'll skip it entirely there.
1038 */
1039# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
1040 if ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)
1041 || ( VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
1042 && fIsBigPage
1043 && PdeSrc.b.u1Global
1044 )
1045 )
1046# else
1047 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) )
1048# endif
1049 {
1050 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
1051 return VINF_SUCCESS;
1052 }
1053# endif /* IN_RING3 */
1054
1055# if PGM_GST_TYPE == PGM_TYPE_AMD64
1056 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1057
1058 /* Fetch the pgm pool shadow descriptor. */
1059 PPGMPOOLPAGE pShwPdpt = pgmPoolGetPageByHCPhys(pVM, pPml4eDst->u & X86_PML4E_PG_MASK);
1060 Assert(pShwPdpt);
1061
1062 /* Fetch the pgm pool shadow descriptor. */
1063 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & SHW_PDPE_PG_MASK);
1064 Assert(pShwPde);
1065
1066 Assert(pPml4eDst->n.u1Present && (pPml4eDst->u & SHW_PDPT_MASK));
1067 RTGCPHYS GCPhysPdpt = pPml4eSrc->u & X86_PML4E_PG_MASK;
1068
1069 if ( !pPml4eSrc->n.u1Present
1070 || pShwPdpt->GCPhys != GCPhysPdpt)
1071 {
1072 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1073 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1074 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1075 pPml4eDst->u = 0;
1076 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1077 PGM_INVL_GUEST_TLBS();
1078 return VINF_SUCCESS;
1079 }
1080 if ( pPml4eSrc->n.u1User != pPml4eDst->n.u1User
1081 || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write))
1082 {
1083 /*
1084 * Mark not present so we can resync the PML4E when it's used.
1085 */
1086 LogFlow(("InvalidatePage: Out-of-sync PML4E at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1087 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1088 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1089 pPml4eDst->u = 0;
1090 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1091 PGM_INVL_GUEST_TLBS();
1092 }
1093 else if (!pPml4eSrc->n.u1Accessed)
1094 {
1095 /*
1096 * Mark not present so we can set the accessed bit.
1097 */
1098 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1099 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1100 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1101 pPml4eDst->u = 0;
1102 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1103 PGM_INVL_GUEST_TLBS();
1104 }
1105
1106 /* Check if the PDPT entry has changed. */
1107 Assert(pPdpeDst->n.u1Present && pPdpeDst->u & SHW_PDPT_MASK);
1108 RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK;
1109 if ( !PdpeSrc.n.u1Present
1110 || pShwPde->GCPhys != GCPhysPd)
1111 {
1112 LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
1113 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1114 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1115 pPdpeDst->u = 0;
1116 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1117 PGM_INVL_GUEST_TLBS();
1118 return VINF_SUCCESS;
1119 }
1120 if ( PdpeSrc.lm.u1User != pPdpeDst->lm.u1User
1121 || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write))
1122 {
1123 /*
1124 * Mark not present so we can resync the PDPTE when it's used.
1125 */
1126 LogFlow(("InvalidatePage: Out-of-sync PDPE at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1127 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1128 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1129 pPdpeDst->u = 0;
1130 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1131 PGM_INVL_GUEST_TLBS();
1132 }
1133 else if (!PdpeSrc.lm.u1Accessed)
1134 {
1135 /*
1136 * Mark not present so we can set the accessed bit.
1137 */
1138 LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1139 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1140 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1141 pPdpeDst->u = 0;
1142 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1143 PGM_INVL_GUEST_TLBS();
1144 }
1145# endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
1146
1147# if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1148 /*
1149 * Update the shadow PDPE and free all the shadow PD entries if the PDPE is marked not present.
1150 * Note: This shouldn't actually be necessary as we monitor the PDPT page for changes.
1151 */
1152 if (!pPDSrc)
1153 {
1154 /* Guest PDPE not present */
1155 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, GCPtrPage);
1156 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1157
1158 Assert(!PdpeSrc.n.u1Present);
1159 LogFlow(("InvalidatePage: guest PDPE %d not present; clear shw pdpe\n", iPdpt));
1160
1161 /* for each page directory entry */
1162 for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++)
1163 {
1164 if ( pPDDst->a[iPD].n.u1Present
1165 && !(pPDDst->a[iPD].u & PGM_PDFLAGS_MAPPING))
1166 {
1167 pgmPoolFree(pVM, pPDDst->a[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
1168 pPDDst->a[iPD].u = 0;
1169 }
1170 }
1171 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
1172 pPdptDst->a[iPdpt].n.u1Present = 0;
1173 PGM_INVL_GUEST_TLBS();
1174 }
1175 AssertMsg(pVM->pgm.s.fMappingsFixed || (PdpeSrc.u & X86_PDPE_PG_MASK) == pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt], ("%RGp vs %RGp (mon)\n", (PdpeSrc.u & X86_PDPE_PG_MASK), pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt]));
1176# endif
1177
1178
1179 /*
1180 * Deal with the Guest PDE.
1181 */
1182 rc = VINF_SUCCESS;
1183 if (PdeSrc.n.u1Present)
1184 {
1185 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
1186 {
1187 /*
1188 * Conflict - Let SyncPT deal with it to avoid duplicate code.
1189 */
1190 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1191 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PAE);
1192 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
1193 }
1194 else if ( PdeSrc.n.u1User != PdeDst.n.u1User
1195 || (!PdeSrc.n.u1Write && PdeDst.n.u1Write))
1196 {
1197 /*
1198 * Mark not present so we can resync the PDE when it's used.
1199 */
1200 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1201 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1202# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1203 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1204# else
1205 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1206# endif
1207 pPdeDst->u = 0;
1208 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1209 PGM_INVL_GUEST_TLBS();
1210 }
1211 else if (!PdeSrc.n.u1Accessed)
1212 {
1213 /*
1214 * Mark not present so we can set the accessed bit.
1215 */
1216 LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1217 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1218# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1219 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1220# else
1221 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1222# endif
1223 pPdeDst->u = 0;
1224 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1225 PGM_INVL_GUEST_TLBS();
1226 }
1227 else if (!fIsBigPage)
1228 {
1229 /*
1230 * 4KB - page.
1231 */
1232 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1233 RTGCPHYS GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1234# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1235 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1236 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1237# endif
1238 if (pShwPage->GCPhys == GCPhys)
1239 {
1240# if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
1241 const unsigned iPTEDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1242 PSHWPT pPT = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1243 if (pPT->a[iPTEDst].n.u1Present)
1244 {
1245# ifdef PGMPOOL_WITH_USER_TRACKING
1246 /* This is very unlikely with caching/monitoring enabled. */
1247 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
1248# endif
1249 pPT->a[iPTEDst].u = 0;
1250 }
1251# else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
1252 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
1253 if (RT_SUCCESS(rc))
1254 rc = VINF_SUCCESS;
1255# endif
1256 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4KBPages));
1257 PGM_INVL_PG(GCPtrPage);
1258 }
1259 else
1260 {
1261 /*
1262 * The page table address changed.
1263 */
1264 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n",
1265 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
1266# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1267 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1268# else
1269 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1270# endif
1271 pPdeDst->u = 0;
1272 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1273 PGM_INVL_GUEST_TLBS();
1274 }
1275 }
1276 else
1277 {
1278 /*
1279 * 2/4MB - page.
1280 */
1281 /* Before freeing the page, check if anything really changed. */
1282 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1283 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1284# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1285 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1286 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1287# endif
1288 if ( pShwPage->GCPhys == GCPhys
1289 && pShwPage->enmKind == BTH_PGMPOOLKIND_PT_FOR_BIG)
1290 {
1291 /* ASSUMES a the given bits are identical for 4M and normal PDEs */
1292 /** @todo PAT */
1293 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1294 == (PdeDst.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1295 && ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
1296 || (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)))
1297 {
1298 LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
1299 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip));
1300 return VINF_SUCCESS;
1301 }
1302 }
1303
1304 /*
1305 * Ok, the page table is present and it's been changed in the guest.
1306 * If we're in host context, we'll just mark it as not present taking the lazy approach.
1307 * We could do this for some flushes in GC too, but we need an algorithm for
1308 * deciding which 4MB pages containing code likely to be executed very soon.
1309 */
1310 LogFlow(("InvalidatePage: Out-of-sync PD at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1311 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1312# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1313 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1314# else
1315 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1316# endif
1317 pPdeDst->u = 0;
1318 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages));
1319 PGM_INVL_BIG_PG(GCPtrPage);
1320 }
1321 }
1322 else
1323 {
1324 /*
1325 * Page directory is not present, mark shadow PDE not present.
1326 */
1327 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
1328 {
1329# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1330 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1331# else
1332 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
1333# endif
1334 pPdeDst->u = 0;
1335 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1336 PGM_INVL_PG(GCPtrPage);
1337 }
1338 else
1339 {
1340 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1341 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDMappings));
1342 }
1343 }
1344
1345 return rc;
1346
1347#else /* guest real and protected mode */
1348 /* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */
1349 return VINF_SUCCESS;
1350#endif
1351}
1352
1353
1354#ifdef PGMPOOL_WITH_USER_TRACKING
1355/**
1356 * Update the tracking of shadowed pages.
1357 *
1358 * @param pVM The VM handle.
1359 * @param pShwPage The shadow page.
1360 * @param HCPhys The physical page we is being dereferenced.
1361 */
1362DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
1363{
1364# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1365 STAM_PROFILE_START(&pVM->pgm.s.StatTrackDeref, a);
1366 LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
1367
1368 /** @todo If this turns out to be a bottle neck (*very* likely) two things can be done:
1369 * 1. have a medium sized HCPhys -> GCPhys TLB (hash?)
1370 * 2. write protect all shadowed pages. I.e. implement caching.
1371 */
1372 /*
1373 * Find the guest address.
1374 */
1375 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1376 pRam;
1377 pRam = pRam->CTX_SUFF(pNext))
1378 {
1379 unsigned iPage = pRam->cb >> PAGE_SHIFT;
1380 while (iPage-- > 0)
1381 {
1382 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
1383 {
1384 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1385 pgmTrackDerefGCPhys(pPool, pShwPage, &pRam->aPages[iPage]);
1386 pShwPage->cPresent--;
1387 pPool->cPresent--;
1388 STAM_PROFILE_STOP(&pVM->pgm.s.StatTrackDeref, a);
1389 return;
1390 }
1391 }
1392 }
1393
1394 for (;;)
1395 AssertReleaseMsgFailed(("HCPhys=%RHp wasn't found!\n", HCPhys));
1396# else /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1397 pShwPage->cPresent--;
1398 pVM->pgm.s.CTX_SUFF(pPool)->cPresent--;
1399# endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1400}
1401
1402
1403/**
1404 * Update the tracking of shadowed pages.
1405 *
1406 * @param pVM The VM handle.
1407 * @param pShwPage The shadow page.
1408 * @param u16 The top 16-bit of the pPage->HCPhys.
1409 * @param pPage Pointer to the guest page. this will be modified.
1410 * @param iPTDst The index into the shadow table.
1411 */
1412DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVM pVM, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
1413{
1414# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1415 /*
1416 * We're making certain assumptions about the placement of cRef and idx.
1417 */
1418 Assert(MM_RAM_FLAGS_IDX_SHIFT == 48);
1419 Assert(MM_RAM_FLAGS_CREFS_SHIFT > MM_RAM_FLAGS_IDX_SHIFT);
1420
1421 /*
1422 * Just deal with the simple first time here.
1423 */
1424 if (!u16)
1425 {
1426 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackVirgin);
1427 u16 = (1 << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | pShwPage->idx;
1428 }
1429 else
1430 u16 = pgmPoolTrackPhysExtAddref(pVM, u16, pShwPage->idx);
1431
1432 /* write back, trying to be clever... */
1433 Log2(("SyncPageWorkerTrackAddRef: u16=%#x pPage->HCPhys=%RHp->%RHp iPTDst=%#x\n",
1434 u16, pPage->HCPhys, (pPage->HCPhys & MM_RAM_FLAGS_NO_REFS_MASK) | ((uint64_t)u16 << MM_RAM_FLAGS_CREFS_SHIFT), iPTDst));
1435 *((uint16_t *)&pPage->HCPhys + 3) = u16; /** @todo PAGE FLAGS */
1436# endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1437
1438 /* update statistics. */
1439 pVM->pgm.s.CTX_SUFF(pPool)->cPresent++;
1440 pShwPage->cPresent++;
1441 if (pShwPage->iFirstPresent > iPTDst)
1442 pShwPage->iFirstPresent = iPTDst;
1443}
1444#endif /* PGMPOOL_WITH_USER_TRACKING */
1445
1446
1447/**
1448 * Creates a 4K shadow page for a guest page.
1449 *
1450 * For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the
1451 * physical address. The PdeSrc argument only the flags are used. No page structured
1452 * will be mapped in this function.
1453 *
1454 * @param pVM VM handle.
1455 * @param pPteDst Destination page table entry.
1456 * @param PdeSrc Source page directory entry (i.e. Guest OS page directory entry).
1457 * Can safely assume that only the flags are being used.
1458 * @param PteSrc Source page table entry (i.e. Guest OS page table entry).
1459 * @param pShwPage Pointer to the shadow page.
1460 * @param iPTDst The index into the shadow table.
1461 *
1462 * @remark Not used for 2/4MB pages!
1463 */
1464DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVM pVM, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
1465{
1466 if (PteSrc.n.u1Present)
1467 {
1468 /*
1469 * Find the ram range.
1470 */
1471 PPGMPAGE pPage;
1472 int rc = pgmPhysGetPageEx(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK, &pPage);
1473 if (RT_SUCCESS(rc))
1474 {
1475 /** @todo investiage PWT, PCD and PAT. */
1476 /*
1477 * Make page table entry.
1478 */
1479 const RTHCPHYS HCPhys = pPage->HCPhys; /** @todo FLAGS */
1480 SHWPTE PteDst;
1481 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1482 {
1483 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
1484 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1485 {
1486#if PGM_SHW_TYPE == PGM_TYPE_EPT
1487 PteDst.u = (HCPhys & EPT_PTE_PG_MASK);
1488 PteDst.n.u1Present = 1;
1489 PteDst.n.u1Execute = 1;
1490 PteDst.n.u1IgnorePAT = 1;
1491 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1492 /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */
1493#else
1494 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1495 | (HCPhys & X86_PTE_PAE_PG_MASK);
1496#endif
1497 }
1498 else
1499 {
1500 LogFlow(("SyncPageWorker: monitored page (%RHp) -> mark not present\n", HCPhys));
1501 PteDst.u = 0;
1502 }
1503 /** @todo count these two kinds. */
1504 }
1505 else
1506 {
1507#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1508 /*
1509 * If the page or page directory entry is not marked accessed,
1510 * we mark the page not present.
1511 */
1512 if (!PteSrc.n.u1Accessed || !PdeSrc.n.u1Accessed)
1513 {
1514 LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n"));
1515 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,AccessedPage));
1516 PteDst.u = 0;
1517 }
1518 else
1519 /*
1520 * If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
1521 * when the page is modified.
1522 */
1523 if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))
1524 {
1525 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPage));
1526 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1527 | (HCPhys & X86_PTE_PAE_PG_MASK)
1528 | PGM_PTFLAGS_TRACK_DIRTY;
1529 }
1530 else
1531#endif
1532 {
1533 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageSkipped));
1534#if PGM_SHW_TYPE == PGM_TYPE_EPT
1535 PteDst.u = (HCPhys & EPT_PTE_PG_MASK);
1536 PteDst.n.u1Present = 1;
1537 PteDst.n.u1Write = 1;
1538 PteDst.n.u1Execute = 1;
1539 PteDst.n.u1IgnorePAT = 1;
1540 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1541 /* PteDst.n.u1Size = 0 */
1542#else
1543 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1544 | (HCPhys & X86_PTE_PAE_PG_MASK);
1545#endif
1546 }
1547 }
1548
1549#ifdef PGMPOOL_WITH_USER_TRACKING
1550 /*
1551 * Keep user track up to date.
1552 */
1553 if (PteDst.n.u1Present)
1554 {
1555 if (!pPteDst->n.u1Present)
1556 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1557 else if ((pPteDst->u & SHW_PTE_PG_MASK) != (PteDst.u & SHW_PTE_PG_MASK))
1558 {
1559 Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
1560 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1561 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1562 }
1563 }
1564 else if (pPteDst->n.u1Present)
1565 {
1566 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1567 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1568 }
1569#endif /* PGMPOOL_WITH_USER_TRACKING */
1570
1571 /*
1572 * Update statistics and commit the entry.
1573 */
1574 if (!PteSrc.n.u1Global)
1575 pShwPage->fSeenNonGlobal = true;
1576 *pPteDst = PteDst;
1577 }
1578 /* else MMIO or invalid page, we must handle them manually in the #PF handler. */
1579 /** @todo count these. */
1580 }
1581 else
1582 {
1583 /*
1584 * Page not-present.
1585 */
1586 LogFlow(("SyncPageWorker: page not present in Pte\n"));
1587#ifdef PGMPOOL_WITH_USER_TRACKING
1588 /* Keep user track up to date. */
1589 if (pPteDst->n.u1Present)
1590 {
1591 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1592 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1593 }
1594#endif /* PGMPOOL_WITH_USER_TRACKING */
1595 pPteDst->u = 0;
1596 /** @todo count these. */
1597 }
1598}
1599
1600
1601/**
1602 * Syncs a guest OS page.
1603 *
1604 * There are no conflicts at this point, neither is there any need for
1605 * page table allocations.
1606 *
1607 * @returns VBox status code.
1608 * @returns VINF_PGM_SYNCPAGE_MODIFIED_PDE if it modifies the PDE in any way.
1609 * @param pVM VM handle.
1610 * @param PdeSrc Page directory entry of the guest.
1611 * @param GCPtrPage Guest context page address.
1612 * @param cPages Number of pages to sync (PGM_SYNC_N_PAGES) (default=1).
1613 * @param uErr Fault error (X86_TRAP_PF_*).
1614 */
1615PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
1616{
1617 LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr));
1618
1619#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
1620 || PGM_GST_TYPE == PGM_TYPE_PAE \
1621 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
1622 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1623 && PGM_SHW_TYPE != PGM_TYPE_EPT
1624
1625# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1626 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
1627# endif
1628
1629 /*
1630 * Assert preconditions.
1631 */
1632 Assert(PdeSrc.n.u1Present);
1633 Assert(cPages);
1634 STAM_COUNTER_INC(&pVM->pgm.s.StatSyncPagePD[(GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK]);
1635
1636 /*
1637 * Get the shadow PDE, find the shadow page table in the pool.
1638 */
1639# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1640 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1641 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
1642
1643# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1644 /* Fetch the pgm pool shadow descriptor. */
1645 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
1646 Assert(pShwPde);
1647# endif
1648
1649# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1650
1651# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1652 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1653 PPGMPOOLPAGE pShwPde;
1654 PX86PDPAE pPDDst;
1655
1656 /* Fetch the pgm pool shadow descriptor. */
1657 int rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
1658 AssertRCSuccessReturn(rc, rc);
1659 Assert(pShwPde);
1660
1661 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
1662 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1663# else
1664 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
1665 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT);
1666 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
1667 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
1668 AssertReturn(pPdeDst, VERR_INTERNAL_ERROR);
1669# endif
1670# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1671 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1672 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1673 PX86PDPAE pPDDst;
1674 PX86PDPT pPdptDst;
1675
1676 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1677 AssertRCSuccessReturn(rc, rc);
1678 Assert(pPDDst && pPdptDst);
1679 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1680# endif
1681
1682 SHWPDE PdeDst = *pPdeDst;
1683 AssertMsg(PdeDst.n.u1Present, ("%p=%llx\n", pPdeDst, (uint64_t)PdeDst.u));
1684 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1685
1686# if PGM_GST_TYPE == PGM_TYPE_AMD64
1687 /* Fetch the pgm pool shadow descriptor. */
1688 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
1689 Assert(pShwPde);
1690# endif
1691
1692 /*
1693 * Check that the page is present and that the shadow PDE isn't out of sync.
1694 */
1695# if PGM_GST_TYPE == PGM_TYPE_AMD64
1696 const bool fBigPage = PdeSrc.b.u1Size;
1697# else
1698 const bool fBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1699# endif
1700 RTGCPHYS GCPhys;
1701 if (!fBigPage)
1702 {
1703 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1704# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1705 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1706 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1707# endif
1708 }
1709 else
1710 {
1711 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1712# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1713 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1714 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1715# endif
1716 }
1717 if ( pShwPage->GCPhys == GCPhys
1718 && PdeSrc.n.u1Present
1719 && (PdeSrc.n.u1User == PdeDst.n.u1User)
1720 && (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write)
1721# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1722 && (!fNoExecuteBitValid || PdeSrc.n.u1NoExecute == PdeDst.n.u1NoExecute)
1723# endif
1724 )
1725 {
1726 /*
1727 * Check that the PDE is marked accessed already.
1728 * Since we set the accessed bit *before* getting here on a #PF, this
1729 * check is only meant for dealing with non-#PF'ing paths.
1730 */
1731 if (PdeSrc.n.u1Accessed)
1732 {
1733 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1734 if (!fBigPage)
1735 {
1736 /*
1737 * 4KB Page - Map the guest page table.
1738 */
1739 PGSTPT pPTSrc;
1740 int rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
1741 if (RT_SUCCESS(rc))
1742 {
1743# ifdef PGM_SYNC_N_PAGES
1744 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1745 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1746 {
1747 /*
1748 * This code path is currently only taken when the caller is PGMTrap0eHandler
1749 * for non-present pages!
1750 *
1751 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1752 * deal with locality.
1753 */
1754 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1755# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1756 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1757 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
1758# else
1759 const unsigned offPTSrc = 0;
1760# endif
1761 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1762 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1763 iPTDst = 0;
1764 else
1765 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1766 for (; iPTDst < iPTDstEnd; iPTDst++)
1767 {
1768 if (!pPTDst->a[iPTDst].n.u1Present)
1769 {
1770 GSTPTE PteSrc = pPTSrc->a[offPTSrc + iPTDst];
1771 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
1772 NOREF(GCPtrCurPage);
1773#ifndef IN_RING0
1774 /*
1775 * Assuming kernel code will be marked as supervisor - and not as user level
1776 * and executed using a conforming code selector - And marked as readonly.
1777 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
1778 */
1779 PPGMPAGE pPage;
1780 if ( ((PdeSrc.u & PteSrc.u) & (X86_PTE_RW | X86_PTE_US))
1781 || iPTDst == ((GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK) /* always sync GCPtrPage */
1782 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)GCPtrCurPage)
1783 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
1784 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1785 )
1786#endif /* else: CSAM not active */
1787 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1788 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1789 GCPtrCurPage, PteSrc.n.u1Present,
1790 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1791 PteSrc.n.u1User & PdeSrc.n.u1User,
1792 (uint64_t)PteSrc.u,
1793 (uint64_t)pPTDst->a[iPTDst].u,
1794 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1795 }
1796 }
1797 }
1798 else
1799# endif /* PGM_SYNC_N_PAGES */
1800 {
1801 const unsigned iPTSrc = (GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK;
1802 GSTPTE PteSrc = pPTSrc->a[iPTSrc];
1803 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1804 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1805 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s\n",
1806 GCPtrPage, PteSrc.n.u1Present,
1807 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1808 PteSrc.n.u1User & PdeSrc.n.u1User,
1809 (uint64_t)PteSrc.u,
1810 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1811 }
1812 }
1813 else /* MMIO or invalid page: emulated in #PF handler. */
1814 {
1815 LogFlow(("PGM_GCPHYS_2_PTR %RGp failed with %Rrc\n", GCPhys, rc));
1816 Assert(!pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK].n.u1Present);
1817 }
1818 }
1819 else
1820 {
1821 /*
1822 * 4/2MB page - lazy syncing shadow 4K pages.
1823 * (There are many causes of getting here, it's no longer only CSAM.)
1824 */
1825 /* Calculate the GC physical address of this 4KB shadow page. */
1826 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
1827 /* Find ram range. */
1828 PPGMPAGE pPage;
1829 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1830 if (RT_SUCCESS(rc))
1831 {
1832 /*
1833 * Make shadow PTE entry.
1834 */
1835 const RTHCPHYS HCPhys = pPage->HCPhys; /** @todo PAGE FLAGS */
1836 SHWPTE PteDst;
1837 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1838 | (HCPhys & X86_PTE_PAE_PG_MASK);
1839 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1840 {
1841 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1842 PteDst.n.u1Write = 0;
1843 else
1844 PteDst.u = 0;
1845 }
1846 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1847# ifdef PGMPOOL_WITH_USER_TRACKING
1848 if (PteDst.n.u1Present && !pPTDst->a[iPTDst].n.u1Present)
1849 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
1850# endif
1851 pPTDst->a[iPTDst] = PteDst;
1852
1853
1854 /*
1855 * If the page is not flagged as dirty and is writable, then make it read-only
1856 * at PD level, so we can set the dirty bit when the page is modified.
1857 *
1858 * ASSUMES that page access handlers are implemented on page table entry level.
1859 * Thus we will first catch the dirty access and set PDE.D and restart. If
1860 * there is an access handler, we'll trap again and let it work on the problem.
1861 */
1862 /** @todo r=bird: figure out why we need this here, SyncPT should've taken care of this already.
1863 * As for invlpg, it simply frees the whole shadow PT.
1864 * ...It's possibly because the guest clears it and the guest doesn't really tell us... */
1865 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
1866 {
1867 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
1868 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
1869 PdeDst.n.u1Write = 0;
1870 }
1871 else
1872 {
1873 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
1874 PdeDst.n.u1Write = PdeSrc.n.u1Write;
1875 }
1876 *pPdeDst = PdeDst;
1877 Log2(("SyncPage: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} GCPhys=%RGp%s\n",
1878 GCPtrPage, PdeSrc.n.u1Present, PdeSrc.n.u1Write, PdeSrc.n.u1User, (uint64_t)PdeSrc.u, GCPhys,
1879 PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1880 }
1881 else
1882 LogFlow(("PGM_GCPHYS_2_PTR %RGp (big) failed with %Rrc\n", GCPhys, rc));
1883 }
1884 return VINF_SUCCESS;
1885 }
1886 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPagePDNAs));
1887 }
1888 else
1889 {
1890 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync));
1891 Log2(("SyncPage: Out-Of-Sync PDE at %RGp PdeSrc=%RX64 PdeDst=%RX64 (GCPhys %RGp vs %RGp)\n",
1892 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys));
1893 }
1894
1895 /*
1896 * Mark the PDE not present. Restart the instruction and let #PF call SyncPT.
1897 * Yea, I'm lazy.
1898 */
1899 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1900# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
1901 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
1902# else
1903 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPDDst);
1904# endif
1905
1906 pPdeDst->u = 0;
1907 PGM_INVL_GUEST_TLBS();
1908 return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
1909
1910#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
1911 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1912 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
1913
1914# ifdef PGM_SYNC_N_PAGES
1915 /*
1916 * Get the shadow PDE, find the shadow page table in the pool.
1917 */
1918# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1919 X86PDE PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage);
1920
1921# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1922 X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);
1923
1924# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1925 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1926 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; NOREF(iPdpt);
1927 PX86PDPAE pPDDst;
1928 X86PDEPAE PdeDst;
1929 PX86PDPT pPdptDst;
1930
1931 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1932 AssertRCSuccessReturn(rc, rc);
1933 Assert(pPDDst && pPdptDst);
1934 PdeDst = pPDDst->a[iPDDst];
1935# elif PGM_SHW_TYPE == PGM_TYPE_EPT
1936 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1937 PEPTPD pPDDst;
1938 EPTPDE PdeDst;
1939
1940 int rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, NULL, &pPDDst);
1941 if (rc != VINF_SUCCESS)
1942 {
1943 AssertRC(rc);
1944 return rc;
1945 }
1946 Assert(pPDDst);
1947 PdeDst = pPDDst->a[iPDDst];
1948# endif
1949 AssertMsg(PdeDst.n.u1Present, ("%#llx\n", (uint64_t)PdeDst.u));
1950 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
1951 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1952
1953 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1954 if (cPages > 1 && !(uErr & X86_TRAP_PF_P))
1955 {
1956 /*
1957 * This code path is currently only taken when the caller is PGMTrap0eHandler
1958 * for non-present pages!
1959 *
1960 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1961 * deal with locality.
1962 */
1963 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1964 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1965 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1966 iPTDst = 0;
1967 else
1968 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1969 for (; iPTDst < iPTDstEnd; iPTDst++)
1970 {
1971 if (!pPTDst->a[iPTDst].n.u1Present)
1972 {
1973 GSTPTE PteSrc;
1974
1975 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
1976
1977 /* Fake the page table entry */
1978 PteSrc.u = GCPtrCurPage;
1979 PteSrc.n.u1Present = 1;
1980 PteSrc.n.u1Dirty = 1;
1981 PteSrc.n.u1Accessed = 1;
1982 PteSrc.n.u1Write = 1;
1983 PteSrc.n.u1User = 1;
1984
1985 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1986
1987 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1988 GCPtrCurPage, PteSrc.n.u1Present,
1989 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1990 PteSrc.n.u1User & PdeSrc.n.u1User,
1991 (uint64_t)PteSrc.u,
1992 (uint64_t)pPTDst->a[iPTDst].u,
1993 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1994 }
1995 else
1996 Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, pPTDst->a[iPTDst].u));
1997 }
1998 }
1999 else
2000# endif /* PGM_SYNC_N_PAGES */
2001 {
2002 GSTPTE PteSrc;
2003 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2004 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
2005
2006 /* Fake the page table entry */
2007 PteSrc.u = GCPtrCurPage;
2008 PteSrc.n.u1Present = 1;
2009 PteSrc.n.u1Dirty = 1;
2010 PteSrc.n.u1Accessed = 1;
2011 PteSrc.n.u1Write = 1;
2012 PteSrc.n.u1User = 1;
2013 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2014
2015 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}PteDst=%08llx%s\n",
2016 GCPtrPage, PteSrc.n.u1Present,
2017 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2018 PteSrc.n.u1User & PdeSrc.n.u1User,
2019 (uint64_t)PteSrc.u,
2020 (uint64_t)pPTDst->a[iPTDst].u,
2021 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2022 }
2023 return VINF_SUCCESS;
2024
2025#else
2026 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
2027 return VERR_INTERNAL_ERROR;
2028#endif
2029}
2030
2031
2032#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2033/**
2034 * Investigate page fault and handle write protection page faults caused by
2035 * dirty bit tracking.
2036 *
2037 * @returns VBox status code.
2038 * @param pVM VM handle.
2039 * @param uErr Page fault error code.
2040 * @param pPdeDst Shadow page directory entry.
2041 * @param pPdeSrc Guest page directory entry.
2042 * @param GCPtrPage Guest context page address.
2043 */
2044PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
2045{
2046 bool fWriteProtect = !!(CPUMGetGuestCR0(pVM) & X86_CR0_WP);
2047 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US);
2048 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW);
2049# if PGM_GST_TYPE == PGM_TYPE_AMD64
2050 bool fBigPagesSupported = true;
2051# else
2052 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
2053# endif
2054# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2055 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
2056# endif
2057 unsigned uPageFaultLevel;
2058 int rc;
2059
2060 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2061 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
2062
2063# if PGM_GST_TYPE == PGM_TYPE_PAE \
2064 || PGM_GST_TYPE == PGM_TYPE_AMD64
2065
2066# if PGM_GST_TYPE == PGM_TYPE_AMD64
2067 PX86PML4E pPml4eSrc;
2068 PX86PDPE pPdpeSrc;
2069
2070 pPdpeSrc = pgmGstGetLongModePDPTPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc);
2071 Assert(pPml4eSrc);
2072
2073 /*
2074 * Real page fault? (PML4E level)
2075 */
2076 if ( (uErr & X86_TRAP_PF_RSVD)
2077 || !pPml4eSrc->n.u1Present
2078 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPml4eSrc->n.u1NoExecute)
2079 || (fWriteFault && !pPml4eSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2080 || (fUserLevelFault && !pPml4eSrc->n.u1User)
2081 )
2082 {
2083 uPageFaultLevel = 0;
2084 goto l_UpperLevelPageFault;
2085 }
2086 Assert(pPdpeSrc);
2087
2088# else /* PAE */
2089 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(&pVM->pgm.s, GCPtrPage);
2090# endif /* PAE */
2091
2092 /*
2093 * Real page fault? (PDPE level)
2094 */
2095 if ( (uErr & X86_TRAP_PF_RSVD)
2096 || !pPdpeSrc->n.u1Present
2097# if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */
2098 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdpeSrc->lm.u1NoExecute)
2099 || (fWriteFault && !pPdpeSrc->lm.u1Write && (fUserLevelFault || fWriteProtect))
2100 || (fUserLevelFault && !pPdpeSrc->lm.u1User)
2101# endif
2102 )
2103 {
2104 uPageFaultLevel = 1;
2105 goto l_UpperLevelPageFault;
2106 }
2107# endif
2108
2109 /*
2110 * Real page fault? (PDE level)
2111 */
2112 if ( (uErr & X86_TRAP_PF_RSVD)
2113 || !pPdeSrc->n.u1Present
2114# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2115 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdeSrc->n.u1NoExecute)
2116# endif
2117 || (fWriteFault && !pPdeSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2118 || (fUserLevelFault && !pPdeSrc->n.u1User) )
2119 {
2120 uPageFaultLevel = 2;
2121 goto l_UpperLevelPageFault;
2122 }
2123
2124 /*
2125 * First check the easy case where the page directory has been marked read-only to track
2126 * the dirty bit of an emulated BIG page
2127 */
2128 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2129 {
2130 /* Mark guest page directory as accessed */
2131# if PGM_GST_TYPE == PGM_TYPE_AMD64
2132 pPml4eSrc->n.u1Accessed = 1;
2133 pPdpeSrc->lm.u1Accessed = 1;
2134# endif
2135 pPdeSrc->b.u1Accessed = 1;
2136
2137 /*
2138 * Only write protection page faults are relevant here.
2139 */
2140 if (fWriteFault)
2141 {
2142 /* Mark guest page directory as dirty (BIG page only). */
2143 pPdeSrc->b.u1Dirty = 1;
2144
2145 if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
2146 {
2147 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2148
2149 Assert(pPdeSrc->b.u1Write);
2150
2151 pPdeDst->n.u1Write = 1;
2152 pPdeDst->n.u1Accessed = 1;
2153 pPdeDst->au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
2154 PGM_INVL_BIG_PG(GCPtrPage);
2155 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2156 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2157 }
2158 }
2159 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2160 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2161 }
2162 /* else: 4KB page table */
2163
2164 /*
2165 * Map the guest page table.
2166 */
2167 PGSTPT pPTSrc;
2168 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2169 if (RT_SUCCESS(rc))
2170 {
2171 /*
2172 * Real page fault?
2173 */
2174 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2175 const GSTPTE PteSrc = *pPteSrc;
2176 if ( !PteSrc.n.u1Present
2177# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2178 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && PteSrc.n.u1NoExecute)
2179# endif
2180 || (fWriteFault && !PteSrc.n.u1Write && (fUserLevelFault || fWriteProtect))
2181 || (fUserLevelFault && !PteSrc.n.u1User)
2182 )
2183 {
2184 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2185 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2186 LogFlow(("CheckPageFault: real page fault at %RGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u));
2187
2188 /* Check the present bit as the shadow tables can cause different error codes by being out of sync.
2189 * See the 2nd case above as well.
2190 */
2191 if (pPdeSrc->n.u1Present && pPteSrc->n.u1Present)
2192 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2193
2194 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2195 return VINF_EM_RAW_GUEST_TRAP;
2196 }
2197 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
2198
2199 /*
2200 * Set the accessed bits in the page directory and the page table.
2201 */
2202# if PGM_GST_TYPE == PGM_TYPE_AMD64
2203 pPml4eSrc->n.u1Accessed = 1;
2204 pPdpeSrc->lm.u1Accessed = 1;
2205# endif
2206 pPdeSrc->n.u1Accessed = 1;
2207 pPteSrc->n.u1Accessed = 1;
2208
2209 /*
2210 * Only write protection page faults are relevant here.
2211 */
2212 if (fWriteFault)
2213 {
2214 /* Write access, so mark guest entry as dirty. */
2215# ifdef VBOX_WITH_STATISTICS
2216 if (!pPteSrc->n.u1Dirty)
2217 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtiedPage));
2218 else
2219 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageAlreadyDirty));
2220# endif
2221
2222 pPteSrc->n.u1Dirty = 1;
2223
2224 if (pPdeDst->n.u1Present)
2225 {
2226#ifndef IN_RING0
2227 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.
2228 * Our individual shadow handlers will provide more information and force a fatal exit.
2229 */
2230 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))
2231 {
2232 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage));
2233 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2234 return VINF_SUCCESS;
2235 }
2236#endif
2237 /*
2238 * Map shadow page table.
2239 */
2240 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2241 if (pShwPage)
2242 {
2243 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2244 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2245 if ( pPteDst->n.u1Present /** @todo Optimize accessed bit emulation? */
2246 && (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY))
2247 {
2248 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
2249# ifdef VBOX_STRICT
2250 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
2251 if (pPage)
2252 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
2253 ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
2254# endif
2255 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2256
2257 Assert(pPteSrc->n.u1Write);
2258
2259 pPteDst->n.u1Write = 1;
2260 pPteDst->n.u1Dirty = 1;
2261 pPteDst->n.u1Accessed = 1;
2262 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY;
2263 PGM_INVL_PG(GCPtrPage);
2264
2265 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2266 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2267 }
2268 }
2269 else
2270 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));
2271 }
2272 }
2273/** @todo Optimize accessed bit emulation? */
2274# ifdef VBOX_STRICT
2275 /*
2276 * Sanity check.
2277 */
2278 else if ( !pPteSrc->n.u1Dirty
2279 && (pPdeSrc->n.u1Write & pPteSrc->n.u1Write)
2280 && pPdeDst->n.u1Present)
2281 {
2282 PPGMPOOLPAGE pShwPage = pgmPoolGetPageByHCPhys(pVM, pPdeDst->u & SHW_PDE_PG_MASK);
2283 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2284 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2285 if ( pPteDst->n.u1Present
2286 && pPteDst->n.u1Write)
2287 LogFlow(("Writable present page %RGv not marked for dirty bit tracking!!!\n", GCPtrPage));
2288 }
2289# endif /* VBOX_STRICT */
2290 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2291 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2292 }
2293 AssertRC(rc);
2294 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2295 return rc;
2296
2297
2298l_UpperLevelPageFault:
2299 /*
2300 * Pagefault detected while checking the PML4E, PDPE or PDE.
2301 * Single exit handler to get rid of duplicate code paths.
2302 */
2303 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2304 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2305 Log(("CheckPageFault: real page fault at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
2306
2307 if (
2308# if PGM_GST_TYPE == PGM_TYPE_AMD64
2309 pPml4eSrc->n.u1Present &&
2310# endif
2311# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
2312 pPdpeSrc->n.u1Present &&
2313# endif
2314 pPdeSrc->n.u1Present)
2315 {
2316 /* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
2317 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2318 {
2319 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2320 }
2321 else
2322 {
2323 /*
2324 * Map the guest page table.
2325 */
2326 PGSTPT pPTSrc;
2327 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2328 if (RT_SUCCESS(rc))
2329 {
2330 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2331 const GSTPTE PteSrc = *pPteSrc;
2332 if (pPteSrc->n.u1Present)
2333 TRPMSetErrorCode(pVM, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2334 }
2335 AssertRC(rc);
2336 }
2337 }
2338 return VINF_EM_RAW_GUEST_TRAP;
2339}
2340#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
2341
2342
2343/**
2344 * Sync a shadow page table.
2345 *
2346 * The shadow page table is not present. This includes the case where
2347 * there is a conflict with a mapping.
2348 *
2349 * @returns VBox status code.
2350 * @param pVM VM handle.
2351 * @param iPD Page directory index.
2352 * @param pPDSrc Source page directory (i.e. Guest OS page directory).
2353 * Assume this is a temporary mapping.
2354 * @param GCPtrPage GC Pointer of the page that caused the fault
2355 */
2356PGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
2357{
2358 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2359 STAM_COUNTER_INC(&pVM->pgm.s.StatSyncPtPD[iPDSrc]);
2360 LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage));
2361
2362#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
2363 || PGM_GST_TYPE == PGM_TYPE_PAE \
2364 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2365 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2366 && PGM_SHW_TYPE != PGM_TYPE_EPT
2367
2368 int rc = VINF_SUCCESS;
2369
2370 /*
2371 * Validate input a little bit.
2372 */
2373 AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%RGv\n", iPDSrc, GCPtrPage));
2374# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2375 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
2376 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
2377
2378# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2379 /* Fetch the pgm pool shadow descriptor. */
2380 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
2381 Assert(pShwPde);
2382# endif
2383
2384# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2385# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2386 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2387 PPGMPOOLPAGE pShwPde;
2388 PX86PDPAE pPDDst;
2389 PSHWPDE pPdeDst;
2390
2391 /* Fetch the pgm pool shadow descriptor. */
2392 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
2393 AssertRCSuccessReturn(rc, rc);
2394 Assert(pShwPde);
2395
2396 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2397 pPdeDst = &pPDDst->a[iPDDst];
2398# else
2399 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
2400 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpt);
2401 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
2402 PSHWPDE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
2403# endif
2404# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2405 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2406 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2407 PX86PDPAE pPDDst;
2408 PX86PDPT pPdptDst;
2409 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2410 AssertRCSuccessReturn(rc, rc);
2411 Assert(pPDDst);
2412 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2413# endif
2414 SHWPDE PdeDst = *pPdeDst;
2415
2416# if PGM_GST_TYPE == PGM_TYPE_AMD64
2417 /* Fetch the pgm pool shadow descriptor. */
2418 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2419 Assert(pShwPde);
2420# endif
2421
2422# ifndef PGM_WITHOUT_MAPPINGS
2423 /*
2424 * Check for conflicts.
2425 * GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
2426 * HC: Simply resolve the conflict.
2427 */
2428 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
2429 {
2430 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
2431# ifndef IN_RING3
2432 Log(("SyncPT: Conflict at %RGv\n", GCPtrPage));
2433 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2434 return VERR_ADDRESS_CONFLICT;
2435# else
2436 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage);
2437 Assert(pMapping);
2438# if PGM_GST_TYPE == PGM_TYPE_32BIT
2439 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2440# elif PGM_GST_TYPE == PGM_TYPE_PAE
2441 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2442# else
2443 AssertFailed(); /* can't happen for amd64 */
2444# endif
2445 if (RT_FAILURE(rc))
2446 {
2447 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2448 return rc;
2449 }
2450 PdeDst = *pPdeDst;
2451# endif
2452 }
2453# else /* PGM_WITHOUT_MAPPINGS */
2454 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
2455# endif /* PGM_WITHOUT_MAPPINGS */
2456 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2457
2458# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2459 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
2460 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
2461# endif
2462
2463 /*
2464 * Sync page directory entry.
2465 */
2466 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2467 if (PdeSrc.n.u1Present)
2468 {
2469 /*
2470 * Allocate & map the page table.
2471 */
2472 PSHWPT pPTDst;
2473# if PGM_GST_TYPE == PGM_TYPE_AMD64
2474 const bool fPageTable = !PdeSrc.b.u1Size;
2475# else
2476 const bool fPageTable = !PdeSrc.b.u1Size || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
2477# endif
2478 PPGMPOOLPAGE pShwPage;
2479 RTGCPHYS GCPhys;
2480 if (fPageTable)
2481 {
2482 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
2483# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2484 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2485 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);
2486# endif
2487# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2488 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2489# else
2490 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2491# endif
2492 }
2493 else
2494 {
2495 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
2496# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2497 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
2498 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
2499# endif
2500# if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2501 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx, iPDDst, &pShwPage);
2502# else
2503 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2504# endif
2505 }
2506 if (rc == VINF_SUCCESS)
2507 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2508 else if (rc == VINF_PGM_CACHED_PAGE)
2509 {
2510 /*
2511 * The PT was cached, just hook it up.
2512 */
2513 if (fPageTable)
2514 PdeDst.u = pShwPage->Core.Key
2515 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2516 else
2517 {
2518 PdeDst.u = pShwPage->Core.Key
2519 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2520 /* (see explanation and assumptions further down.) */
2521 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2522 {
2523 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2524 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2525 PdeDst.b.u1Write = 0;
2526 }
2527 }
2528 *pPdeDst = PdeDst;
2529# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2530 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2531# endif
2532 return VINF_SUCCESS;
2533 }
2534 else if (rc == VERR_PGM_POOL_FLUSHED)
2535 {
2536 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
2537# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2538 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2539# endif
2540 return VINF_PGM_SYNC_CR3;
2541 }
2542 else
2543 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2544 PdeDst.u &= X86_PDE_AVL_MASK;
2545 PdeDst.u |= pShwPage->Core.Key;
2546
2547 /*
2548 * Page directory has been accessed (this is a fault situation, remember).
2549 */
2550 pPDSrc->a[iPDSrc].n.u1Accessed = 1;
2551 if (fPageTable)
2552 {
2553 /*
2554 * Page table - 4KB.
2555 *
2556 * Sync all or just a few entries depending on PGM_SYNC_N_PAGES.
2557 */
2558 Log2(("SyncPT: 4K %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx}\n",
2559 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u));
2560 PGSTPT pPTSrc;
2561 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
2562 if (RT_SUCCESS(rc))
2563 {
2564 /*
2565 * Start by syncing the page directory entry so CSAM's TLB trick works.
2566 */
2567 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | X86_PDE_AVL_MASK))
2568 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2569 *pPdeDst = PdeDst;
2570# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2571 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2572# endif
2573
2574 /*
2575 * Directory/page user or supervisor privilege: (same goes for read/write)
2576 *
2577 * Directory Page Combined
2578 * U/S U/S U/S
2579 * 0 0 0
2580 * 0 1 0
2581 * 1 0 0
2582 * 1 1 1
2583 *
2584 * Simple AND operation. Table listed for completeness.
2585 *
2586 */
2587 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT4K));
2588# ifdef PGM_SYNC_N_PAGES
2589 unsigned iPTBase = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2590 unsigned iPTDst = iPTBase;
2591 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
2592 if (iPTDst <= PGM_SYNC_NR_PAGES / 2)
2593 iPTDst = 0;
2594 else
2595 iPTDst -= PGM_SYNC_NR_PAGES / 2;
2596# else /* !PGM_SYNC_N_PAGES */
2597 unsigned iPTDst = 0;
2598 const unsigned iPTDstEnd = RT_ELEMENTS(pPTDst->a);
2599# endif /* !PGM_SYNC_N_PAGES */
2600# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2601 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2602 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
2603# else
2604 const unsigned offPTSrc = 0;
2605# endif
2606 for (; iPTDst < iPTDstEnd; iPTDst++)
2607 {
2608 const unsigned iPTSrc = iPTDst + offPTSrc;
2609 const GSTPTE PteSrc = pPTSrc->a[iPTSrc];
2610
2611 if (PteSrc.n.u1Present) /* we've already cleared it above */
2612 {
2613# ifndef IN_RING0
2614 /*
2615 * Assuming kernel code will be marked as supervisor - and not as user level
2616 * and executed using a conforming code selector - And marked as readonly.
2617 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
2618 */
2619 PPGMPAGE pPage;
2620 if ( ((PdeSrc.u & pPTSrc->a[iPTSrc].u) & (X86_PTE_RW | X86_PTE_US))
2621 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)))
2622 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
2623 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2624 )
2625# endif
2626 PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2627 Log2(("SyncPT: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n",
2628 (RTGCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)),
2629 PteSrc.n.u1Present,
2630 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2631 PteSrc.n.u1User & PdeSrc.n.u1User,
2632 (uint64_t)PteSrc.u,
2633 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],
2634 (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) ));
2635 }
2636 } /* for PTEs */
2637 }
2638 }
2639 else
2640 {
2641 /*
2642 * Big page - 2/4MB.
2643 *
2644 * We'll walk the ram range list in parallel and optimize lookups.
2645 * We will only sync on shadow page table at a time.
2646 */
2647 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT4M));
2648
2649 /**
2650 * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
2651 */
2652
2653 /*
2654 * Start by syncing the page directory entry.
2655 */
2656 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | (X86_PDE_AVL_MASK & ~PGM_PDFLAGS_TRACK_DIRTY)))
2657 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2658
2659 /*
2660 * If the page is not flagged as dirty and is writable, then make it read-only
2661 * at PD level, so we can set the dirty bit when the page is modified.
2662 *
2663 * ASSUMES that page access handlers are implemented on page table entry level.
2664 * Thus we will first catch the dirty access and set PDE.D and restart. If
2665 * there is an access handler, we'll trap again and let it work on the problem.
2666 */
2667 /** @todo move the above stuff to a section in the PGM documentation. */
2668 Assert(!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY));
2669 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2670 {
2671 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2672 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2673 PdeDst.b.u1Write = 0;
2674 }
2675 *pPdeDst = PdeDst;
2676# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2677 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2678# endif
2679
2680 /*
2681 * Fill the shadow page table.
2682 */
2683 /* Get address and flags from the source PDE. */
2684 SHWPTE PteDstBase;
2685 PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
2686
2687 /* Loop thru the entries in the shadow PT. */
2688 const RTGCPTR GCPtr = (GCPtrPage >> SHW_PD_SHIFT) << SHW_PD_SHIFT; NOREF(GCPtr);
2689 Log2(("SyncPT: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} Shw=%RGv GCPhys=%RGp %s\n",
2690 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr,
2691 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2692 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2693 unsigned iPTDst = 0;
2694 while (iPTDst < RT_ELEMENTS(pPTDst->a))
2695 {
2696 /* Advance ram range list. */
2697 while (pRam && GCPhys > pRam->GCPhysLast)
2698 pRam = pRam->CTX_SUFF(pNext);
2699 if (pRam && GCPhys >= pRam->GCPhys)
2700 {
2701 unsigned iHCPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2702 do
2703 {
2704 /* Make shadow PTE. */
2705 PPGMPAGE pPage = &pRam->aPages[iHCPage];
2706 SHWPTE PteDst;
2707
2708 /* Make sure the RAM has already been allocated. */
2709 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) /** @todo PAGE FLAGS */
2710 {
2711 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
2712 {
2713# ifdef IN_RING3
2714 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
2715# else
2716 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2717# endif
2718 if (rc != VINF_SUCCESS)
2719 return rc;
2720 }
2721 }
2722
2723 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2724 {
2725 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
2726 {
2727 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2728 PteDst.n.u1Write = 0;
2729 }
2730 else
2731 PteDst.u = 0;
2732 }
2733# ifndef IN_RING0
2734 /*
2735 * Assuming kernel code will be marked as supervisor and not as user level and executed
2736 * using a conforming code selector. Don't check for readonly, as that implies the whole
2737 * 4MB can be code or readonly data. Linux enables write access for its large pages.
2738 */
2739 else if ( !PdeSrc.n.u1User
2740 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))))
2741 PteDst.u = 0;
2742# endif
2743 else
2744 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2745# ifdef PGMPOOL_WITH_USER_TRACKING
2746 if (PteDst.n.u1Present)
2747 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, pPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst); /** @todo PAGE FLAGS */
2748# endif
2749 /* commit it */
2750 pPTDst->a[iPTDst] = PteDst;
2751 Log4(("SyncPT: BIG %RGv PteDst:{P=%d RW=%d U=%d raw=%08llx}%s\n",
2752 (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,
2753 PteDst.u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2754
2755 /* advance */
2756 GCPhys += PAGE_SIZE;
2757 iHCPage++;
2758 iPTDst++;
2759 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2760 && GCPhys <= pRam->GCPhysLast);
2761 }
2762 else if (pRam)
2763 {
2764 Log(("Invalid pages at %RGp\n", GCPhys));
2765 do
2766 {
2767 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2768 GCPhys += PAGE_SIZE;
2769 iPTDst++;
2770 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2771 && GCPhys < pRam->GCPhys);
2772 }
2773 else
2774 {
2775 Log(("Invalid pages at %RGp (2)\n", GCPhys));
2776 for ( ; iPTDst < RT_ELEMENTS(pPTDst->a); iPTDst++)
2777 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2778 }
2779 } /* while more PTEs */
2780 } /* 4KB / 4MB */
2781 }
2782 else
2783 AssertRelease(!PdeDst.n.u1Present);
2784
2785 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2786 if (RT_FAILURE(rc))
2787 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPTFailed));
2788 return rc;
2789
2790#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
2791 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2792 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
2793
2794
2795 /*
2796 * Validate input a little bit.
2797 */
2798 int rc = VINF_SUCCESS;
2799# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2800 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2801 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
2802
2803# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2804 /* Fetch the pgm pool shadow descriptor. */
2805 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
2806 Assert(pShwPde);
2807# endif
2808
2809# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2810# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2811 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2812 PPGMPOOLPAGE pShwPde;
2813 PX86PDPAE pPDDst;
2814 PSHWPDE pPdeDst;
2815
2816 /* Fetch the pgm pool shadow descriptor. */
2817 rc = pgmShwGetPaePoolPagePD(&pVM->pgm.s, GCPtrPage, &pShwPde);
2818 AssertRCSuccessReturn(rc, rc);
2819 Assert(pShwPde);
2820
2821 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2822 pPdeDst = &pPDDst->a[iPDDst];
2823# else
2824 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm!*/;
2825 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
2826# endif
2827
2828# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2829 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2830 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2831 PX86PDPAE pPDDst;
2832 PX86PDPT pPdptDst;
2833 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2834 AssertRCSuccessReturn(rc, rc);
2835 Assert(pPDDst);
2836 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2837
2838 /* Fetch the pgm pool shadow descriptor. */
2839 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2840 Assert(pShwPde);
2841
2842# elif PGM_SHW_TYPE == PGM_TYPE_EPT
2843 const unsigned iPdpt = (GCPtrPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
2844 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2845 PEPTPD pPDDst;
2846 PEPTPDPT pPdptDst;
2847
2848 rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);
2849 if (rc != VINF_SUCCESS)
2850 {
2851 AssertRC(rc);
2852 return rc;
2853 }
2854 Assert(pPDDst);
2855 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2856
2857 /* Fetch the pgm pool shadow descriptor. */
2858 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpt].u & EPT_PDPTE_PG_MASK);
2859 Assert(pShwPde);
2860# endif
2861 SHWPDE PdeDst = *pPdeDst;
2862
2863 Assert(!(PdeDst.u & PGM_PDFLAGS_MAPPING));
2864 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2865
2866 GSTPDE PdeSrc;
2867 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2868 PdeSrc.n.u1Present = 1;
2869 PdeSrc.n.u1Write = 1;
2870 PdeSrc.n.u1Accessed = 1;
2871 PdeSrc.n.u1User = 1;
2872
2873 /*
2874 * Allocate & map the page table.
2875 */
2876 PSHWPT pPTDst;
2877 PPGMPOOLPAGE pShwPage;
2878 RTGCPHYS GCPhys;
2879
2880 /* Virtual address = physical address */
2881 GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK;
2882# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_EPT || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
2883 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2884# else
2885 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
2886# endif
2887
2888 if ( rc == VINF_SUCCESS
2889 || rc == VINF_PGM_CACHED_PAGE)
2890 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2891 else
2892 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2893
2894 PdeDst.u &= X86_PDE_AVL_MASK;
2895 PdeDst.u |= pShwPage->Core.Key;
2896 PdeDst.n.u1Present = 1;
2897 PdeDst.n.u1Write = 1;
2898# if PGM_SHW_TYPE == PGM_TYPE_EPT
2899 PdeDst.n.u1Execute = 1;
2900# else
2901 PdeDst.n.u1User = 1;
2902 PdeDst.n.u1Accessed = 1;
2903# endif
2904 *pPdeDst = PdeDst;
2905
2906 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
2907 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2908 return rc;
2909
2910#else
2911 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE));
2912 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2913 return VERR_INTERNAL_ERROR;
2914#endif
2915}
2916
2917
2918
2919/**
2920 * Prefetch a page/set of pages.
2921 *
2922 * Typically used to sync commonly used pages before entering raw mode
2923 * after a CR3 reload.
2924 *
2925 * @returns VBox status code.
2926 * @param pVM VM handle.
2927 * @param GCPtrPage Page to invalidate.
2928 */
2929PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCPTR GCPtrPage)
2930{
2931#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2932 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
2933 /*
2934 * Check that all Guest levels thru the PDE are present, getting the
2935 * PD and PDE in the processes.
2936 */
2937 int rc = VINF_SUCCESS;
2938# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2939# if PGM_GST_TYPE == PGM_TYPE_32BIT
2940 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
2941 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
2942# elif PGM_GST_TYPE == PGM_TYPE_PAE
2943 unsigned iPDSrc;
2944# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2945 X86PDPE PdpeSrc;
2946 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
2947# else
2948 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
2949# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
2950 if (!pPDSrc)
2951 return VINF_SUCCESS; /* not present */
2952# elif PGM_GST_TYPE == PGM_TYPE_AMD64
2953 unsigned iPDSrc;
2954 PX86PML4E pPml4eSrc;
2955 X86PDPE PdpeSrc;
2956 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
2957 if (!pPDSrc)
2958 return VINF_SUCCESS; /* not present */
2959# endif
2960 const GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2961# else
2962 PGSTPD pPDSrc = NULL;
2963 const unsigned iPDSrc = 0;
2964 GSTPDE PdeSrc;
2965
2966 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2967 PdeSrc.n.u1Present = 1;
2968 PdeSrc.n.u1Write = 1;
2969 PdeSrc.n.u1Accessed = 1;
2970 PdeSrc.n.u1User = 1;
2971# endif
2972
2973 if (PdeSrc.n.u1Present && PdeSrc.n.u1Accessed)
2974 {
2975# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2976 const X86PDE PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage);
2977# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2978# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2979 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2980 PX86PDPAE pPDDst;
2981 X86PDEPAE PdeDst;
2982# if PGM_GST_TYPE != PGM_TYPE_PAE
2983 X86PDPE PdpeSrc;
2984
2985 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
2986 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
2987# endif
2988 int rc = pgmShwSyncPaePDPtr(pVM, GCPtrPage, &PdpeSrc, &pPDDst);
2989 if (rc != VINF_SUCCESS)
2990 {
2991 AssertRC(rc);
2992 return rc;
2993 }
2994 Assert(pPDDst);
2995 PdeDst = pPDDst->a[iPDDst];
2996# else
2997 const X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);
2998# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
2999
3000# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3001 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3002 PX86PDPAE pPDDst;
3003 X86PDEPAE PdeDst;
3004
3005# if PGM_GST_TYPE == PGM_TYPE_PROT
3006 /* AMD-V nested paging */
3007 X86PML4E Pml4eSrc;
3008 X86PDPE PdpeSrc;
3009 PX86PML4E pPml4eSrc = &Pml4eSrc;
3010
3011 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3012 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3013 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3014# endif
3015
3016 int rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3017 if (rc != VINF_SUCCESS)
3018 {
3019 AssertRC(rc);
3020 return rc;
3021 }
3022 Assert(pPDDst);
3023 PdeDst = pPDDst->a[iPDDst];
3024# endif
3025 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
3026 {
3027 if (!PdeDst.n.u1Present)
3028 /** r=bird: This guy will set the A bit on the PDE, probably harmless. */
3029 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
3030 else
3031 {
3032 /** @note We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
3033 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
3034 * makes no sense to prefetch more than one page.
3035 */
3036 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
3037 if (RT_SUCCESS(rc))
3038 rc = VINF_SUCCESS;
3039 }
3040 }
3041 }
3042 return rc;
3043
3044#elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3045 return VINF_SUCCESS; /* ignore */
3046#endif
3047}
3048
3049
3050
3051
3052/**
3053 * Syncs a page during a PGMVerifyAccess() call.
3054 *
3055 * @returns VBox status code (informational included).
3056 * @param GCPtrPage The address of the page to sync.
3057 * @param fPage The effective guest page flags.
3058 * @param uErr The trap error code.
3059 */
3060PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
3061{
3062 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));
3063
3064 Assert(!HWACCMIsNestedPagingActive(pVM));
3065#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
3066 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3067
3068# ifndef IN_RING0
3069 if (!(fPage & X86_PTE_US))
3070 {
3071 /*
3072 * Mark this page as safe.
3073 */
3074 /** @todo not correct for pages that contain both code and data!! */
3075 Log(("CSAMMarkPage %RGv; scanned=%d\n", GCPtrPage, true));
3076 CSAMMarkPage(pVM, (RTRCPTR)GCPtrPage, true);
3077 }
3078# endif
3079
3080 /*
3081 * Get guest PD and index.
3082 */
3083# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3084# if PGM_GST_TYPE == PGM_TYPE_32BIT
3085 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
3086 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3087# elif PGM_GST_TYPE == PGM_TYPE_PAE
3088 unsigned iPDSrc;
3089# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3090 X86PDPE PdpeSrc;
3091 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
3092# else
3093 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
3094# endif
3095
3096 if (pPDSrc)
3097 {
3098 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3099 return VINF_EM_RAW_GUEST_TRAP;
3100 }
3101# elif PGM_GST_TYPE == PGM_TYPE_AMD64
3102 unsigned iPDSrc;
3103 PX86PML4E pPml4eSrc;
3104 X86PDPE PdpeSrc;
3105 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3106 if (!pPDSrc)
3107 {
3108 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3109 return VINF_EM_RAW_GUEST_TRAP;
3110 }
3111# endif
3112# else
3113 PGSTPD pPDSrc = NULL;
3114 const unsigned iPDSrc = 0;
3115# endif
3116 int rc = VINF_SUCCESS;
3117
3118 /*
3119 * First check if the shadow pd is present.
3120 */
3121# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3122 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
3123# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3124 PX86PDEPAE pPdeDst;
3125# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3126 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3127 PX86PDPAE pPDDst;
3128# if PGM_GST_TYPE != PGM_TYPE_PAE
3129 X86PDPE PdpeSrc;
3130
3131 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
3132 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
3133# endif
3134 rc = pgmShwSyncPaePDPtr(pVM, GCPtrPage, &PdpeSrc, &pPDDst);
3135 if (rc != VINF_SUCCESS)
3136 {
3137 AssertRC(rc);
3138 return rc;
3139 }
3140 Assert(pPDDst);
3141 pPdeDst = &pPDDst->a[iPDDst];
3142# else
3143 pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
3144# endif
3145# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3146 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3147 PX86PDPAE pPDDst;
3148 PX86PDEPAE pPdeDst;
3149
3150# if PGM_GST_TYPE == PGM_TYPE_PROT
3151 /* AMD-V nested paging */
3152 X86PML4E Pml4eSrc;
3153 X86PDPE PdpeSrc;
3154 PX86PML4E pPml4eSrc = &Pml4eSrc;
3155
3156 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3157 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3158 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3159# endif
3160
3161 rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3162 if (rc != VINF_SUCCESS)
3163 {
3164 AssertRC(rc);
3165 return rc;
3166 }
3167 Assert(pPDDst);
3168 pPdeDst = &pPDDst->a[iPDDst];
3169# endif
3170 if (!pPdeDst->n.u1Present)
3171 {
3172 rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, GCPtrPage);
3173 AssertRC(rc);
3174 if (rc != VINF_SUCCESS)
3175 return rc;
3176 }
3177
3178# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3179 /* Check for dirty bit fault */
3180 rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
3181 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
3182 Log(("PGMVerifyAccess: success (dirty)\n"));
3183 else
3184 {
3185 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
3186#else
3187 {
3188 GSTPDE PdeSrc;
3189 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
3190 PdeSrc.n.u1Present = 1;
3191 PdeSrc.n.u1Write = 1;
3192 PdeSrc.n.u1Accessed = 1;
3193 PdeSrc.n.u1User = 1;
3194
3195#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
3196 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
3197 if (uErr & X86_TRAP_PF_US)
3198 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
3199 else /* supervisor */
3200 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
3201
3202 rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
3203 if (RT_SUCCESS(rc))
3204 {
3205 /* Page was successfully synced */
3206 Log2(("PGMVerifyAccess: success (sync)\n"));
3207 rc = VINF_SUCCESS;
3208 }
3209 else
3210 {
3211 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", GCPtrPage, rc));
3212 return VINF_EM_RAW_GUEST_TRAP;
3213 }
3214 }
3215 return rc;
3216
3217#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3218
3219 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
3220 return VERR_INTERNAL_ERROR;
3221#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3222}
3223
3224
3225#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
3226# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
3227/**
3228 * Figures out which kind of shadow page this guest PDE warrants.
3229 *
3230 * @returns Shadow page kind.
3231 * @param pPdeSrc The guest PDE in question.
3232 * @param cr4 The current guest cr4 value.
3233 */
3234DECLINLINE(PGMPOOLKIND) PGM_BTH_NAME(CalcPageKind)(const GSTPDE *pPdeSrc, uint32_t cr4)
3235{
3236# if PMG_GST_TYPE == PGM_TYPE_AMD64
3237 if (!pPdeSrc->n.u1Size)
3238# else
3239 if (!pPdeSrc->n.u1Size || !(cr4 & X86_CR4_PSE))
3240# endif
3241 return BTH_PGMPOOLKIND_PT_FOR_PT;
3242 //switch (pPdeSrc->u & (X86_PDE4M_RW | X86_PDE4M_US /*| X86_PDE4M_PAE_NX*/))
3243 //{
3244 // case 0:
3245 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RO;
3246 // case X86_PDE4M_RW:
3247 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW;
3248 // case X86_PDE4M_US:
3249 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US;
3250 // case X86_PDE4M_RW | X86_PDE4M_US:
3251 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US;
3252# if 0
3253 // case X86_PDE4M_PAE_NX:
3254 // return BTH_PGMPOOLKIND_PT_FOR_BIG_NX;
3255 // case X86_PDE4M_RW | X86_PDE4M_PAE_NX:
3256 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_NX;
3257 // case X86_PDE4M_US | X86_PDE4M_PAE_NX:
3258 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US_NX;
3259 // case X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PAE_NX:
3260 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US_NX;
3261# endif
3262 return BTH_PGMPOOLKIND_PT_FOR_BIG;
3263 //}
3264}
3265# endif
3266#endif
3267
3268#undef MY_STAM_COUNTER_INC
3269#define MY_STAM_COUNTER_INC(a) do { } while (0)
3270
3271
3272/**
3273 * Syncs the paging hierarchy starting at CR3.
3274 *
3275 * @returns VBox status code, no specials.
3276 * @param pVM The virtual machine.
3277 * @param cr0 Guest context CR0 register
3278 * @param cr3 Guest context CR3 register
3279 * @param cr4 Guest context CR4 register
3280 * @param fGlobal Including global page directories or not
3281 */
3282PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
3283{
3284 if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
3285 fGlobal = true; /* Change this CR3 reload to be a global one. */
3286
3287 LogFlow(("SyncCR3 %d\n", fGlobal));
3288
3289#if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3290 /*
3291 * Update page access handlers.
3292 * The virtual are always flushed, while the physical are only on demand.
3293 * WARNING: We are incorrectly not doing global flushing on Virtual Handler updates. We'll
3294 * have to look into that later because it will have a bad influence on the performance.
3295 * @note SvL: There's no need for that. Just invalidate the virtual range(s).
3296 * bird: Yes, but that won't work for aliases.
3297 */
3298 /** @todo this MUST go away. See #1557. */
3299 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3300 PGM_GST_NAME(HandlerVirtualUpdate)(pVM, cr4);
3301 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3302#endif
3303
3304#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3305 /*
3306 * Nested / EPT - almost no work.
3307 */
3308 /** @todo check if this is really necessary; the call does it as well... */
3309 HWACCMFlushTLB(pVM);
3310 return VINF_SUCCESS;
3311
3312#elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3313 /*
3314 * AMD64 (Shw & Gst) - No need to check all paging levels; we zero
3315 * out the shadow parts when the guest modifies its tables.
3316 */
3317 return VINF_SUCCESS;
3318
3319#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3320
3321# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3322# ifdef PGM_WITHOUT_MAPPINGS
3323 Assert(pVM->pgm.s.fMappingsFixed);
3324 return VINF_SUCCESS;
3325# else
3326 /* Nothing to do when mappings are fixed. */
3327 if (pVM->pgm.s.fMappingsFixed)
3328 return VINF_SUCCESS;
3329
3330 int rc = PGMMapResolveConflicts(pVM);
3331 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
3332 if (rc == VINF_PGM_SYNC_CR3)
3333 {
3334 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3335 return VINF_PGM_SYNC_CR3;
3336 }
3337# endif
3338 return VINF_SUCCESS;
3339# else
3340 /*
3341 * PAE and 32-bit legacy mode (shadow).
3342 * (Guest PAE, 32-bit legacy, protected and real modes.)
3343 */
3344 Assert(fGlobal || (cr4 & X86_CR4_PGE));
3345 MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Global) : &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3NotGlobal));
3346
3347# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
3348 bool const fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3349
3350 /*
3351 * Get page directory addresses.
3352 */
3353# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3354 PX86PDE pPDEDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, 0);
3355# else /* PGM_SHW_TYPE == PGM_TYPE_PAE */
3356# if PGM_GST_TYPE == PGM_TYPE_32BIT
3357 PX86PDEPAE pPDEDst = NULL;
3358# endif
3359# endif
3360
3361# if PGM_GST_TYPE == PGM_TYPE_32BIT
3362 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3363 Assert(pPDSrc);
3364# if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3365 Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == (RTR3PTR)pPDSrc);
3366# endif
3367# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
3368
3369 /*
3370 * Iterate the the CR3 page.
3371 */
3372 PPGMMAPPING pMapping;
3373 unsigned iPdNoMapping;
3374 const bool fRawR0Enabled = EMIsRawRing0Enabled(pVM);
3375 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3376
3377 /* Only check mappings if they are supposed to be put into the shadow page table. */
3378 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
3379 {
3380 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3381 iPdNoMapping = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U;
3382 }
3383 else
3384 {
3385 pMapping = 0;
3386 iPdNoMapping = ~0U;
3387 }
3388
3389# if PGM_GST_TYPE == PGM_TYPE_PAE
3390 for (uint64_t iPdpt = 0; iPdpt < GST_PDPE_ENTRIES; iPdpt++)
3391 {
3392 unsigned iPDSrc;
3393 X86PDPE PdpeSrc;
3394 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT, &iPDSrc, &PdpeSrc);
3395 PX86PDEPAE pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT);
3396 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
3397
3398 if (pPDSrc == NULL)
3399 {
3400 /* PDPE not present */
3401 if (pPdptDst->a[iPdpt].n.u1Present)
3402 {
3403 LogFlow(("SyncCR3: guest PDPE %lld not present; clear shw pdpe\n", iPdpt));
3404 /* for each page directory entry */
3405 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3406 {
3407 if ( pPDEDst[iPD].n.u1Present
3408 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING))
3409 {
3410 pgmPoolFree(pVM, pPDEDst[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
3411 pPDEDst[iPD].u = 0;
3412 }
3413 }
3414 }
3415 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
3416 pPdptDst->a[iPdpt].n.u1Present = 0;
3417 continue;
3418 }
3419# else /* PGM_GST_TYPE != PGM_TYPE_PAE */
3420 {
3421# endif /* PGM_GST_TYPE != PGM_TYPE_PAE */
3422 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
3423 {
3424# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3425 if ((iPD & 255) == 0) /* Start of new PD. */
3426 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)iPD << GST_PD_SHIFT);
3427# endif
3428# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3429 Assert(pgmShwGet32BitPDEPtr(&pVM->pgm.s, (uint32_t)iPD << SHW_PD_SHIFT) == pPDEDst);
3430# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3431# if defined(VBOX_STRICT) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* Unfortuantely not reliable with PGMR0DynMap and multiple VMs. */
3432 RTGCPTR GCPtrStrict = (uint32_t)iPD << GST_PD_SHIFT;
3433# if PGM_GST_TYPE == PGM_TYPE_PAE
3434 GCPtrStrict |= iPdpt << X86_PDPT_SHIFT;
3435# endif
3436 AssertMsg(pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict) == pPDEDst, ("%p vs %p (%RGv)\n", pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict), pPDEDst, GCPtrStrict));
3437# endif /* VBOX_STRICT */
3438# endif
3439 GSTPDE PdeSrc = pPDSrc->a[iPD];
3440 if ( PdeSrc.n.u1Present
3441 && (PdeSrc.n.u1User || fRawR0Enabled))
3442 {
3443# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3444 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3445 && !defined(PGM_WITHOUT_MAPPINGS)
3446
3447 /*
3448 * Check for conflicts with GC mappings.
3449 */
3450# if PGM_GST_TYPE == PGM_TYPE_PAE
3451 if (iPD + iPdpt * X86_PG_PAE_ENTRIES == iPdNoMapping)
3452# else
3453 if (iPD == iPdNoMapping)
3454# endif
3455 {
3456 if (pVM->pgm.s.fMappingsFixed)
3457 {
3458 /* It's fixed, just skip the mapping. */
3459 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3460 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
3461 iPD += cPTs - 1;
3462# if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
3463 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
3464# else
3465 pPDEDst += cPTs;
3466# endif
3467 pMapping = pMapping->CTX_SUFF(pNext);
3468 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3469 continue;
3470 }
3471# ifdef IN_RING3
3472# if PGM_GST_TYPE == PGM_TYPE_32BIT
3473 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3474# elif PGM_GST_TYPE == PGM_TYPE_PAE
3475 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3476# endif
3477 if (RT_FAILURE(rc))
3478 return rc;
3479
3480 /*
3481 * Update iPdNoMapping and pMapping.
3482 */
3483 pMapping = pVM->pgm.s.pMappingsR3;
3484 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3485 pMapping = pMapping->pNextR3;
3486 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3487# else /* !IN_RING3 */
3488 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3489 return VINF_PGM_SYNC_CR3;
3490# endif /* !IN_RING3 */
3491 }
3492# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3493 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3494# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3495
3496 /*
3497 * Sync page directory entry.
3498 *
3499 * The current approach is to allocated the page table but to set
3500 * the entry to not-present and postpone the page table synching till
3501 * it's actually used.
3502 */
3503# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3504 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3505# elif PGM_GST_TYPE == PGM_TYPE_PAE
3506 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES; NOREF(iPdShw);
3507# else
3508 const unsigned iPdShw = iPD; NOREF(iPdShw);
3509# endif
3510 {
3511 SHWPDE PdeDst = *pPDEDst;
3512 if (PdeDst.n.u1Present)
3513 {
3514 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
3515 RTGCPHYS GCPhys;
3516 if ( !PdeSrc.b.u1Size
3517 || !fBigPagesSupported)
3518 {
3519 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
3520# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3521 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
3522 GCPhys |= i * (PAGE_SIZE / 2);
3523# endif
3524 }
3525 else
3526 {
3527 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
3528# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3529 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
3530 GCPhys |= i * X86_PAGE_2M_SIZE;
3531# endif
3532 }
3533
3534 if ( pShwPage->GCPhys == GCPhys
3535 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)
3536 && ( pShwPage->fCached
3537 || ( !fGlobal
3538 && ( false
3539# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
3540 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3541 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */
3542 || ( !pShwPage->fSeenNonGlobal
3543 && (cr4 & X86_CR4_PGE))
3544# endif
3545 )
3546 )
3547 )
3548 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))
3549 || ( fBigPagesSupported
3550 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)
3551 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))
3552 )
3553 )
3554 {
3555# ifdef VBOX_WITH_STATISTICS
3556 if ( !fGlobal
3557 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
3558 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))
3559 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPD));
3560 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))
3561 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPT));
3562 else
3563 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstCacheHit));
3564# endif /* VBOX_WITH_STATISTICS */
3565 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
3566 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
3567 //# ifdef PGMPOOL_WITH_CACHE
3568 // pgmPoolCacheUsed(pPool, pShwPage);
3569 //# endif
3570 }
3571 else
3572 {
3573 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);
3574 pPDEDst->u = 0;
3575 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreed));
3576 }
3577 }
3578 else
3579 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstNotPresent));
3580
3581 /* advance */
3582 pPDEDst++;
3583 } /* foreach 2MB PAE PDE in 4MB guest PDE */
3584 }
3585# if PGM_GST_TYPE == PGM_TYPE_PAE
3586 else if (iPD + iPdpt * X86_PG_PAE_ENTRIES != iPdNoMapping)
3587# else
3588 else if (iPD != iPdNoMapping)
3589# endif
3590 {
3591 /*
3592 * Check if there is any page directory to mark not present here.
3593 */
3594# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3595 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
3596# elif PGM_GST_TYPE == PGM_TYPE_PAE
3597 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES;
3598# else
3599 const unsigned iPdShw = iPD;
3600# endif
3601 {
3602 if (pPDEDst->n.u1Present)
3603 {
3604 pgmPoolFree(pVM, pPDEDst->u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdShw);
3605 pPDEDst->u = 0;
3606 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreedSrcNP));
3607 }
3608 pPDEDst++;
3609 }
3610 }
3611 else
3612 {
3613# if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
3614 || PGM_GST_TYPE == PGM_TYPE_PAE) \
3615 && !defined(PGM_WITHOUT_MAPPINGS)
3616
3617 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
3618
3619 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3620 if (pVM->pgm.s.fMappingsFixed)
3621 {
3622 /* It's fixed, just skip the mapping. */
3623 pMapping = pMapping->CTX_SUFF(pNext);
3624 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3625 }
3626 else
3627 {
3628 /*
3629 * Check for conflicts for subsequent pagetables
3630 * and advance to the next mapping.
3631 */
3632 iPdNoMapping = ~0U;
3633 unsigned iPT = cPTs;
3634 while (iPT-- > 1)
3635 {
3636 if ( pPDSrc->a[iPD + iPT].n.u1Present
3637 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))
3638 {
3639# ifdef IN_RING3
3640# if PGM_GST_TYPE == PGM_TYPE_32BIT
3641 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
3642# elif PGM_GST_TYPE == PGM_TYPE_PAE
3643 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
3644# endif
3645 if (RT_FAILURE(rc))
3646 return rc;
3647
3648 /*
3649 * Update iPdNoMapping and pMapping.
3650 */
3651 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3652 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
3653 pMapping = pMapping->CTX_SUFF(pNext);
3654 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
3655 break;
3656# else
3657 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3658 return VINF_PGM_SYNC_CR3;
3659# endif
3660 }
3661 }
3662 if (iPdNoMapping == ~0U && pMapping)
3663 {
3664 pMapping = pMapping->CTX_SUFF(pNext);
3665 if (pMapping)
3666 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT;
3667 }
3668 }
3669
3670 /* advance. */
3671 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
3672 iPD += cPTs - 1;
3673# if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
3674 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
3675# else
3676 pPDEDst += cPTs;
3677# endif
3678# if PGM_GST_TYPE != PGM_SHW_TYPE
3679 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE);
3680# endif
3681# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3682 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
3683# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
3684 }
3685
3686 } /* for iPD */
3687 } /* for each PDPTE (PAE) */
3688 return VINF_SUCCESS;
3689
3690# else /* guest real and protected mode */
3691 return VINF_SUCCESS;
3692# endif
3693#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
3694#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3695}
3696
3697
3698
3699
3700#ifdef VBOX_STRICT
3701#ifdef IN_RC
3702# undef AssertMsgFailed
3703# define AssertMsgFailed Log
3704#endif
3705#ifdef IN_RING3
3706# include <VBox/dbgf.h>
3707
3708/**
3709 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
3710 *
3711 * @returns VBox status code (VINF_SUCCESS).
3712 * @param pVM The VM handle.
3713 * @param cr3 The root of the hierarchy.
3714 * @param crr The cr4, only PAE and PSE is currently used.
3715 * @param fLongMode Set if long mode, false if not long mode.
3716 * @param cMaxDepth Number of levels to dump.
3717 * @param pHlp Pointer to the output functions.
3718 */
3719__BEGIN_DECLS
3720VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
3721__END_DECLS
3722
3723#endif
3724
3725/**
3726 * Checks that the shadow page table is in sync with the guest one.
3727 *
3728 * @returns The number of errors.
3729 * @param pVM The virtual machine.
3730 * @param cr3 Guest context CR3 register
3731 * @param cr4 Guest context CR4 register
3732 * @param GCPtr Where to start. Defaults to 0.
3733 * @param cb How much to check. Defaults to everything.
3734 */
3735PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
3736{
3737#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3738 return 0;
3739#else
3740 unsigned cErrors = 0;
3741
3742#if PGM_GST_TYPE == PGM_TYPE_PAE
3743 /** @todo currently broken; crashes below somewhere */
3744 AssertFailed();
3745#endif
3746
3747#if PGM_GST_TYPE == PGM_TYPE_32BIT \
3748 || PGM_GST_TYPE == PGM_TYPE_PAE \
3749 || PGM_GST_TYPE == PGM_TYPE_AMD64
3750
3751# if PGM_GST_TYPE == PGM_TYPE_AMD64
3752 bool fBigPagesSupported = true;
3753# else
3754 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
3755# endif
3756 PPGM pPGM = &pVM->pgm.s;
3757 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */
3758 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */
3759# ifndef IN_RING0
3760 RTHCPHYS HCPhys; /* general usage. */
3761# endif
3762 int rc;
3763
3764 /*
3765 * Check that the Guest CR3 and all its mappings are correct.
3766 */
3767 AssertMsgReturn(pPGM->GCPhysCR3 == (cr3 & GST_CR3_PAGE_MASK),
3768 ("Invalid GCPhysCR3=%RGp cr3=%RGp\n", pPGM->GCPhysCR3, (RTGCPHYS)cr3),
3769 false);
3770# if !defined(IN_RING0) && PGM_GST_TYPE != PGM_TYPE_AMD64
3771# if PGM_GST_TYPE == PGM_TYPE_32BIT
3772 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGst32BitPdRC, NULL, &HCPhysShw);
3773# else
3774 rc = PGMShwGetPage(pVM, (RTGCPTR)pPGM->pGstPaePdptRC, NULL, &HCPhysShw);
3775# endif
3776 AssertRCReturn(rc, 1);
3777 HCPhys = NIL_RTHCPHYS;
3778 rc = pgmRamGCPhys2HCPhys(pPGM, cr3 & GST_CR3_PAGE_MASK, &HCPhys);
3779 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false);
3780# if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3)
3781 RTGCPHYS GCPhys;
3782 rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys);
3783 AssertRCReturn(rc, 1);
3784 AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false);
3785# endif
3786# endif /* !IN_RING0 */
3787
3788 /*
3789 * Get and check the Shadow CR3.
3790 */
3791# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3792 unsigned cPDEs = X86_PG_ENTRIES;
3793 unsigned cIncrement = X86_PG_ENTRIES * PAGE_SIZE;
3794# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3795# if PGM_GST_TYPE == PGM_TYPE_32BIT
3796 unsigned cPDEs = X86_PG_PAE_ENTRIES * 4; /* treat it as a 2048 entry table. */
3797# else
3798 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3799# endif
3800 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3801# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3802 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3803 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3804# endif
3805 if (cb != ~(RTGCPTR)0)
3806 cPDEs = RT_MIN(cb >> SHW_PD_SHIFT, 1);
3807
3808/** @todo call the other two PGMAssert*() functions. */
3809
3810# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3811 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3812# endif
3813
3814# if PGM_GST_TYPE == PGM_TYPE_AMD64
3815 unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3816
3817 for (; iPml4 < X86_PG_PAE_ENTRIES; iPml4++)
3818 {
3819 PPGMPOOLPAGE pShwPdpt = NULL;
3820 PX86PML4E pPml4eSrc;
3821 PX86PML4E pPml4eDst;
3822 RTGCPHYS GCPhysPdptSrc;
3823
3824 pPml4eSrc = pgmGstGetLongModePML4EPtr(&pVM->pgm.s, iPml4);
3825 pPml4eDst = pgmShwGetLongModePML4EPtr(&pVM->pgm.s, iPml4);
3826
3827 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */
3828 if (!pPml4eDst->n.u1Present)
3829 {
3830 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3831 continue;
3832 }
3833
3834 pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
3835 GCPhysPdptSrc = pPml4eSrc->u & X86_PML4E_PG_MASK_FULL;
3836
3837 if (pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present)
3838 {
3839 AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3840 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3841 cErrors++;
3842 continue;
3843 }
3844
3845 if (GCPhysPdptSrc != pShwPdpt->GCPhys)
3846 {
3847 AssertMsgFailed(("Physical address doesn't match! iPml4 %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
3848 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3849 cErrors++;
3850 continue;
3851 }
3852
3853 if ( pPml4eDst->n.u1User != pPml4eSrc->n.u1User
3854 || pPml4eDst->n.u1Write != pPml4eSrc->n.u1Write
3855 || pPml4eDst->n.u1NoExecute != pPml4eSrc->n.u1NoExecute)
3856 {
3857 AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3858 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3859 cErrors++;
3860 continue;
3861 }
3862# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3863 {
3864# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3865
3866# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3867 /*
3868 * Check the PDPTEs too.
3869 */
3870 unsigned iPdpt = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
3871
3872 for (;iPdpt <= SHW_PDPT_MASK; iPdpt++)
3873 {
3874 unsigned iPDSrc;
3875 PPGMPOOLPAGE pShwPde = NULL;
3876 PX86PDPE pPdpeDst;
3877 RTGCPHYS GCPhysPdeSrc;
3878# if PGM_GST_TYPE == PGM_TYPE_PAE
3879 X86PDPE PdpeSrc;
3880 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtr, &iPDSrc, &PdpeSrc);
3881 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
3882# else
3883 PX86PML4E pPml4eSrc;
3884 X86PDPE PdpeSrc;
3885 PX86PDPT pPdptDst;
3886 PX86PDPAE pPDDst;
3887 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3888
3889 rc = pgmShwGetLongModePDPtr(pVM, GCPtr, NULL, &pPdptDst, &pPDDst);
3890 if (rc != VINF_SUCCESS)
3891 {
3892 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
3893 GCPtr += 512 * _2M;
3894 continue; /* next PDPTE */
3895 }
3896 Assert(pPDDst);
3897# endif
3898 Assert(iPDSrc == 0);
3899
3900 pPdpeDst = &pPdptDst->a[iPdpt];
3901
3902 if (!pPdpeDst->n.u1Present)
3903 {
3904 GCPtr += 512 * _2M;
3905 continue; /* next PDPTE */
3906 }
3907
3908 pShwPde = pgmPoolGetPage(pPool, pPdpeDst->u & X86_PDPE_PG_MASK);
3909 GCPhysPdeSrc = PdpeSrc.u & X86_PDPE_PG_MASK;
3910
3911 if (pPdpeDst->n.u1Present != PdpeSrc.n.u1Present)
3912 {
3913 AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3914 GCPtr += 512 * _2M;
3915 cErrors++;
3916 continue;
3917 }
3918
3919 if (GCPhysPdeSrc != pShwPde->GCPhys)
3920 {
3921# if PGM_GST_TYPE == PGM_TYPE_AMD64
3922 AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3923# else
3924 AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3925# endif
3926 GCPtr += 512 * _2M;
3927 cErrors++;
3928 continue;
3929 }
3930
3931# if PGM_GST_TYPE == PGM_TYPE_AMD64
3932 if ( pPdpeDst->lm.u1User != PdpeSrc.lm.u1User
3933 || pPdpeDst->lm.u1Write != PdpeSrc.lm.u1Write
3934 || pPdpeDst->lm.u1NoExecute != PdpeSrc.lm.u1NoExecute)
3935 {
3936 AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3937 GCPtr += 512 * _2M;
3938 cErrors++;
3939 continue;
3940 }
3941# endif
3942
3943# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3944 {
3945# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3946# if PGM_GST_TYPE == PGM_TYPE_32BIT
3947 GSTPD const *pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
3948# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3949 PCX86PD pPDDst = pgmShwGet32BitPDPtr(&pVM->pgm.s);
3950# endif
3951# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
3952 /*
3953 * Iterate the shadow page directory.
3954 */
3955 GCPtr = (GCPtr >> SHW_PD_SHIFT) << SHW_PD_SHIFT;
3956 unsigned iPDDst = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
3957
3958 for (;
3959 iPDDst < cPDEs;
3960 iPDDst++, GCPtr += cIncrement)
3961 {
3962# if PGM_SHW_TYPE == PGM_TYPE_PAE
3963 const SHWPDE PdeDst = *pgmShwGetPaePDEPtr(pPGM, GCPtr);
3964# else
3965 const SHWPDE PdeDst = pPDDst->a[iPDDst];
3966# endif
3967 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
3968 {
3969 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3970 if ((PdeDst.u & X86_PDE_AVL_MASK) != PGM_PDFLAGS_MAPPING)
3971 {
3972 AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
3973 cErrors++;
3974 continue;
3975 }
3976 }
3977 else if ( (PdeDst.u & X86_PDE_P)
3978 || ((PdeDst.u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY))
3979 )
3980 {
3981 HCPhysShw = PdeDst.u & SHW_PDE_PG_MASK;
3982 PPGMPOOLPAGE pPoolPage = pgmPoolGetPageByHCPhys(pVM, HCPhysShw);
3983 if (!pPoolPage)
3984 {
3985 AssertMsgFailed(("Invalid page table address %RHp at %RGv! PdeDst=%#RX64\n",
3986 HCPhysShw, GCPtr, (uint64_t)PdeDst.u));
3987 cErrors++;
3988 continue;
3989 }
3990 const SHWPT *pPTDst = (const SHWPT *)PGMPOOL_PAGE_2_PTR(pVM, pPoolPage);
3991
3992 if (PdeDst.u & (X86_PDE4M_PWT | X86_PDE4M_PCD))
3993 {
3994 AssertMsgFailed(("PDE flags PWT and/or PCD is set at %RGv! These flags are not virtualized! PdeDst=%#RX64\n",
3995 GCPtr, (uint64_t)PdeDst.u));
3996 cErrors++;
3997 }
3998
3999 if (PdeDst.u & (X86_PDE4M_G | X86_PDE4M_D))
4000 {
4001 AssertMsgFailed(("4K PDE reserved flags at %RGv! PdeDst=%#RX64\n",
4002 GCPtr, (uint64_t)PdeDst.u));
4003 cErrors++;
4004 }
4005
4006 const GSTPDE PdeSrc = pPDSrc->a[(iPDDst >> (GST_PD_SHIFT - SHW_PD_SHIFT)) & GST_PD_MASK];
4007 if (!PdeSrc.n.u1Present)
4008 {
4009 AssertMsgFailed(("Guest PDE at %RGv is not present! PdeDst=%#RX64 PdeSrc=%#RX64\n",
4010 GCPtr, (uint64_t)PdeDst.u, (uint64_t)PdeSrc.u));
4011 cErrors++;
4012 continue;
4013 }
4014
4015 if ( !PdeSrc.b.u1Size
4016 || !fBigPagesSupported)
4017 {
4018 GCPhysGst = PdeSrc.u & GST_PDE_PG_MASK;
4019# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4020 GCPhysGst |= (iPDDst & 1) * (PAGE_SIZE / 2);
4021# endif
4022 }
4023 else
4024 {
4025# if PGM_GST_TYPE == PGM_TYPE_32BIT
4026 if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK)
4027 {
4028 AssertMsgFailed(("Guest PDE at %RGv is using PSE36 or similar! PdeSrc=%#RX64\n",
4029 GCPtr, (uint64_t)PdeSrc.u));
4030 cErrors++;
4031 continue;
4032 }
4033# endif
4034 GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
4035# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4036 GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT);
4037# endif
4038 }
4039
4040 if ( pPoolPage->enmKind
4041 != (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
4042 {
4043 AssertMsgFailed(("Invalid shadow page table kind %d at %RGv! PdeSrc=%#RX64\n",
4044 pPoolPage->enmKind, GCPtr, (uint64_t)PdeSrc.u));
4045 cErrors++;
4046 }
4047
4048 PPGMPAGE pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4049 if (!pPhysPage)
4050 {
4051 AssertMsgFailed(("Cannot find guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
4052 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
4053 cErrors++;
4054 continue;
4055 }
4056
4057 if (GCPhysGst != pPoolPage->GCPhys)
4058 {
4059 AssertMsgFailed(("GCPhysGst=%RGp != pPage->GCPhys=%RGp at %RGv\n",
4060 GCPhysGst, pPoolPage->GCPhys, GCPtr));
4061 cErrors++;
4062 continue;
4063 }
4064
4065 if ( !PdeSrc.b.u1Size
4066 || !fBigPagesSupported)
4067 {
4068 /*
4069 * Page Table.
4070 */
4071 const GSTPT *pPTSrc;
4072 rc = PGM_GCPHYS_2_PTR(pVM, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
4073 if (RT_FAILURE(rc))
4074 {
4075 AssertMsgFailed(("Cannot map/convert guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
4076 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
4077 cErrors++;
4078 continue;
4079 }
4080 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/))
4081 != (PdeDst.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/)))
4082 {
4083 /// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
4084 // (This problem will go away when/if we shadow multiple CR3s.)
4085 AssertMsgFailed(("4K PDE flags mismatch at %RGv! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4086 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4087 cErrors++;
4088 continue;
4089 }
4090 if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4091 {
4092 AssertMsgFailed(("4K PDEs cannot have PGM_PDFLAGS_TRACK_DIRTY set! GCPtr=%RGv PdeDst=%#RX64\n",
4093 GCPtr, (uint64_t)PdeDst.u));
4094 cErrors++;
4095 continue;
4096 }
4097
4098 /* iterate the page table. */
4099# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
4100 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
4101 const unsigned offPTSrc = ((GCPtr >> SHW_PD_SHIFT) & 1) * 512;
4102# else
4103 const unsigned offPTSrc = 0;
4104# endif
4105 for (unsigned iPT = 0, off = 0;
4106 iPT < RT_ELEMENTS(pPTDst->a);
4107 iPT++, off += PAGE_SIZE)
4108 {
4109 const SHWPTE PteDst = pPTDst->a[iPT];
4110
4111 /* skip not-present entries. */
4112 if (!(PteDst.u & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
4113 continue;
4114 Assert(PteDst.n.u1Present);
4115
4116 const GSTPTE PteSrc = pPTSrc->a[iPT + offPTSrc];
4117 if (!PteSrc.n.u1Present)
4118 {
4119# ifdef IN_RING3
4120 PGMAssertHandlerAndFlagsInSync(pVM);
4121 PGMR3DumpHierarchyGC(pVM, cr3, cr4, (PdeSrc.u & GST_PDE_PG_MASK));
4122# endif
4123 AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n",
4124 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u, pPTSrc, iPT + offPTSrc, PdeSrc.au32[0],
4125 (PdeSrc.u & GST_PDE_PG_MASK) + (iPT + offPTSrc)*sizeof(PteSrc)));
4126 cErrors++;
4127 continue;
4128 }
4129
4130 uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
4131# if 1 /** @todo sync accessed bit properly... */
4132 fIgnoreFlags |= X86_PTE_A;
4133# endif
4134
4135 /* match the physical addresses */
4136 HCPhysShw = PteDst.u & SHW_PTE_PG_MASK;
4137 GCPhysGst = PteSrc.u & GST_PTE_PG_MASK;
4138
4139# ifdef IN_RING3
4140 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4141 if (RT_FAILURE(rc))
4142 {
4143 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4144 {
4145 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
4146 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4147 cErrors++;
4148 continue;
4149 }
4150 }
4151 else if (HCPhysShw != (HCPhys & SHW_PTE_PG_MASK))
4152 {
4153 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4154 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4155 cErrors++;
4156 continue;
4157 }
4158# endif
4159
4160 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4161 if (!pPhysPage)
4162 {
4163# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4164 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4165 {
4166 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
4167 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4168 cErrors++;
4169 continue;
4170 }
4171# endif
4172 if (PteDst.n.u1Write)
4173 {
4174 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4175 GCPtr + off, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4176 cErrors++;
4177 }
4178 fIgnoreFlags |= X86_PTE_RW;
4179 }
4180 else if (HCPhysShw != (PGM_PAGE_GET_HCPHYS(pPhysPage) & SHW_PTE_PG_MASK))
4181 {
4182 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
4183 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4184 cErrors++;
4185 continue;
4186 }
4187
4188 /* flags */
4189 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4190 {
4191 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4192 {
4193 if (PteDst.n.u1Write)
4194 {
4195 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! HCPhys=%RHp PteSrc=%#RX64 PteDst=%#RX64\n",
4196 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4197 cErrors++;
4198 continue;
4199 }
4200 fIgnoreFlags |= X86_PTE_RW;
4201 }
4202 else
4203 {
4204 if (PteDst.n.u1Present)
4205 {
4206 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! HCPhys=%RHp PteSrc=%#RX64 PteDst=%#RX64\n",
4207 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4208 cErrors++;
4209 continue;
4210 }
4211 fIgnoreFlags |= X86_PTE_P;
4212 }
4213 }
4214 else
4215 {
4216 if (!PteSrc.n.u1Dirty && PteSrc.n.u1Write)
4217 {
4218 if (PteDst.n.u1Write)
4219 {
4220 AssertMsgFailed(("!DIRTY page at %RGv is writable! PteSrc=%#RX64 PteDst=%#RX64\n",
4221 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4222 cErrors++;
4223 continue;
4224 }
4225 if (!(PteDst.u & PGM_PTFLAGS_TRACK_DIRTY))
4226 {
4227 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4228 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4229 cErrors++;
4230 continue;
4231 }
4232 if (PteDst.n.u1Dirty)
4233 {
4234 AssertMsgFailed(("!DIRTY page at %RGv is marked DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4235 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4236 cErrors++;
4237 }
4238# if 0 /** @todo sync access bit properly... */
4239 if (PteDst.n.u1Accessed != PteSrc.n.u1Accessed)
4240 {
4241 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4242 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4243 cErrors++;
4244 }
4245 fIgnoreFlags |= X86_PTE_RW;
4246# else
4247 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4248# endif
4249 }
4250 else if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4251 {
4252 /* access bit emulation (not implemented). */
4253 if (PteSrc.n.u1Accessed || PteDst.n.u1Present)
4254 {
4255 AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
4256 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4257 cErrors++;
4258 continue;
4259 }
4260 if (!PteDst.n.u1Accessed)
4261 {
4262 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
4263 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4264 cErrors++;
4265 }
4266 fIgnoreFlags |= X86_PTE_P;
4267 }
4268# ifdef DEBUG_sandervl
4269 fIgnoreFlags |= X86_PTE_D | X86_PTE_A;
4270# endif
4271 }
4272
4273 if ( (PteSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4274 && (PteSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags)
4275 )
4276 {
4277 AssertMsgFailed(("Flags mismatch at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
4278 GCPtr + off, (uint64_t)PteSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4279 fIgnoreFlags, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
4280 cErrors++;
4281 continue;
4282 }
4283 } /* foreach PTE */
4284 }
4285 else
4286 {
4287 /*
4288 * Big Page.
4289 */
4290 uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
4291 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
4292 {
4293 if (PdeDst.n.u1Write)
4294 {
4295 AssertMsgFailed(("!DIRTY page at %RGv is writable! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4296 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4297 cErrors++;
4298 continue;
4299 }
4300 if (!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY))
4301 {
4302 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
4303 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4304 cErrors++;
4305 continue;
4306 }
4307# if 0 /** @todo sync access bit properly... */
4308 if (PdeDst.n.u1Accessed != PdeSrc.b.u1Accessed)
4309 {
4310 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
4311 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4312 cErrors++;
4313 }
4314 fIgnoreFlags |= X86_PTE_RW;
4315# else
4316 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4317# endif
4318 }
4319 else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4320 {
4321 /* access bit emulation (not implemented). */
4322 if (PdeSrc.b.u1Accessed || PdeDst.n.u1Present)
4323 {
4324 AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4325 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4326 cErrors++;
4327 continue;
4328 }
4329 if (!PdeDst.n.u1Accessed)
4330 {
4331 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4332 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4333 cErrors++;
4334 }
4335 fIgnoreFlags |= X86_PTE_P;
4336 }
4337
4338 if ((PdeSrc.u & ~fIgnoreFlags) != (PdeDst.u & ~fIgnoreFlags))
4339 {
4340 AssertMsgFailed(("Flags mismatch (B) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
4341 GCPtr, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PdeDst.u & ~fIgnoreFlags,
4342 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4343 cErrors++;
4344 }
4345
4346 /* iterate the page table. */
4347 for (unsigned iPT = 0, off = 0;
4348 iPT < RT_ELEMENTS(pPTDst->a);
4349 iPT++, off += PAGE_SIZE, GCPhysGst += PAGE_SIZE)
4350 {
4351 const SHWPTE PteDst = pPTDst->a[iPT];
4352
4353 if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4354 {
4355 AssertMsgFailed(("The PTE at %RGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
4356 GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4357 cErrors++;
4358 }
4359
4360 /* skip not-present entries. */
4361 if (!PteDst.n.u1Present) /** @todo deal with ALL handlers and CSAM !P pages! */
4362 continue;
4363
4364 fIgnoreFlags = X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT | X86_PTE_D | X86_PTE_A | X86_PTE_G | X86_PTE_PAE_NX;
4365
4366 /* match the physical addresses */
4367 HCPhysShw = PteDst.u & X86_PTE_PAE_PG_MASK;
4368
4369# ifdef IN_RING3
4370 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4371 if (RT_FAILURE(rc))
4372 {
4373 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4374 {
4375 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4376 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4377 cErrors++;
4378 }
4379 }
4380 else if (HCPhysShw != (HCPhys & X86_PTE_PAE_PG_MASK))
4381 {
4382 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4383 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4384 cErrors++;
4385 continue;
4386 }
4387# endif
4388 pPhysPage = pgmPhysGetPage(pPGM, GCPhysGst);
4389 if (!pPhysPage)
4390 {
4391# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4392 if (HCPhysShw != MMR3PageDummyHCPhys(pVM))
4393 {
4394 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4395 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4396 cErrors++;
4397 continue;
4398 }
4399# endif
4400 if (PteDst.n.u1Write)
4401 {
4402 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4403 GCPtr + off, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4404 cErrors++;
4405 }
4406 fIgnoreFlags |= X86_PTE_RW;
4407 }
4408 else if (HCPhysShw != (pPhysPage->HCPhys & X86_PTE_PAE_PG_MASK))
4409 {
4410 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4411 GCPtr + off, HCPhysShw, pPhysPage->HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4412 cErrors++;
4413 continue;
4414 }
4415
4416 /* flags */
4417 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4418 {
4419 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4420 {
4421 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
4422 {
4423 if (PteDst.n.u1Write)
4424 {
4425 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n",
4426 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4427 cErrors++;
4428 continue;
4429 }
4430 fIgnoreFlags |= X86_PTE_RW;
4431 }
4432 }
4433 else
4434 {
4435 if (PteDst.n.u1Present)
4436 {
4437 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n",
4438 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4439 cErrors++;
4440 continue;
4441 }
4442 fIgnoreFlags |= X86_PTE_P;
4443 }
4444 }
4445
4446 if ( (PdeSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4447 && (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags) /* lazy phys handler dereg. */
4448 )
4449 {
4450 AssertMsgFailed(("Flags mismatch (BT) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
4451 GCPtr + off, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4452 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4453 cErrors++;
4454 continue;
4455 }
4456 } /* for each PTE */
4457 }
4458 }
4459 /* not present */
4460
4461 } /* for each PDE */
4462
4463 } /* for each PDPTE */
4464
4465 } /* for each PML4E */
4466
4467# ifdef DEBUG
4468 if (cErrors)
4469 LogFlow(("AssertCR3: cErrors=%d\n", cErrors));
4470# endif
4471
4472#endif /* GST == 32BIT, PAE or AMD64 */
4473 return cErrors;
4474
4475#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
4476}
4477#endif /* VBOX_STRICT */
4478
4479
4480/**
4481 * Sets up the CR3 for shadow paging
4482 *
4483 * @returns Strict VBox status code.
4484 * @retval VINF_SUCCESS.
4485 *
4486 * @param pVM VM handle.
4487 * @param GCPhysCR3 The physical address in the CR3 register.
4488 */
4489PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
4490{
4491 /* Update guest paging info. */
4492#if PGM_GST_TYPE == PGM_TYPE_32BIT \
4493 || PGM_GST_TYPE == PGM_TYPE_PAE \
4494 || PGM_GST_TYPE == PGM_TYPE_AMD64
4495
4496 LogFlow(("MapCR3: %RGp\n", GCPhysCR3));
4497
4498 /*
4499 * Map the page CR3 points at.
4500 */
4501 RTHCPHYS HCPhysGuestCR3;
4502 RTHCPTR HCPtrGuestCR3;
4503 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
4504 if (RT_SUCCESS(rc))
4505 {
4506 rc = PGMMap(pVM, (RTGCPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
4507 if (RT_SUCCESS(rc))
4508 {
4509 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
4510# if PGM_GST_TYPE == PGM_TYPE_32BIT
4511 pVM->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
4512# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4513 pVM->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
4514# endif
4515 pVM->pgm.s.pGst32BitPdRC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
4516
4517# elif PGM_GST_TYPE == PGM_TYPE_PAE
4518 unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
4519 pVM->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4520# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4521 pVM->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4522# endif
4523 pVM->pgm.s.pGstPaePdptRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
4524 Log(("Cached mapping %RGv\n", pVM->pgm.s.pGstPaePdptRC));
4525
4526 /*
4527 * Map the 4 PDs too.
4528 */
4529 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
4530 RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
4531 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
4532 {
4533 if (pGuestPDPT->a[i].n.u1Present)
4534 {
4535 RTHCPTR HCPtr;
4536 RTHCPHYS HCPhys;
4537 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
4538 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
4539 if (RT_SUCCESS(rc2))
4540 {
4541 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
4542 AssertRCReturn(rc, rc);
4543
4544 pVM->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr;
4545# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4546 pVM->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr;
4547# endif
4548 pVM->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
4549 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
4550 PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */
4551 continue;
4552 }
4553 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
4554 }
4555
4556 pVM->pgm.s.apGstPaePDsR3[i] = 0;
4557# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4558 pVM->pgm.s.apGstPaePDsR0[i] = 0;
4559# endif
4560 pVM->pgm.s.apGstPaePDsRC[i] = 0;
4561 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4562 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */
4563 }
4564
4565# elif PGM_GST_TYPE == PGM_TYPE_AMD64
4566 pVM->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
4567# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4568 pVM->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
4569# endif
4570# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
4571 if (!HWACCMIsNestedPagingActive(pVM))
4572 {
4573 /*
4574 * Update the shadow root page as well since that's not fixed.
4575 */
4576 /** @todo Move this into PGMAllBth.h. */
4577 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4578 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4579 {
4580 /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
4581 /** @todo Coordinate this better with the pool. */
4582 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)->enmKind != PGMPOOLKIND_FREE)
4583 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
4584 pVM->pgm.s.pShwPageCR3R3 = 0;
4585 pVM->pgm.s.pShwPageCR3R0 = 0;
4586 pVM->pgm.s.pShwRootR3 = 0;
4587# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4588 pVM->pgm.s.pShwRootR0 = 0;
4589# endif
4590 pVM->pgm.s.HCPhysShwCR3 = 0;
4591 }
4592
4593 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
4594 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4595 if (rc == VERR_PGM_POOL_FLUSHED)
4596 {
4597 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
4598 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
4599 return VINF_PGM_SYNC_CR3;
4600 }
4601 AssertRCReturn(rc, rc);
4602# ifdef IN_RING0
4603 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4604# else
4605 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4606# endif
4607 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
4608 Assert(pVM->pgm.s.pShwRootR3);
4609# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4610 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4611# endif
4612 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
4613 rc = VINF_SUCCESS; /* clear it - pgmPoolAlloc returns hints. */
4614 }
4615# endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
4616# endif
4617 }
4618 else
4619 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4620 }
4621 else
4622 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4623
4624#else /* prot/real stub */
4625 int rc = VINF_SUCCESS;
4626#endif
4627
4628#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4629 /* Update shadow paging info for guest modes with paging (32, pae, 64). */
4630# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4631 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4632 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
4633 && ( PGM_GST_TYPE != PGM_TYPE_REAL \
4634 && PGM_GST_TYPE != PGM_TYPE_PROT))
4635
4636 Assert(!HWACCMIsNestedPagingActive(pVM));
4637
4638 /*
4639 * Update the shadow root page as well since that's not fixed.
4640 */
4641 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4642 PPGMPOOLPAGE pOldShwPageCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
4643 uint32_t iOldShwUserTable = pVM->pgm.s.iShwUserTable;
4644 uint32_t iOldShwUser = pVM->pgm.s.iShwUser;
4645 PPGMPOOLPAGE pNewShwPageCR3;
4646
4647 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
4648 rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3);
4649 if (rc == VERR_PGM_POOL_FLUSHED)
4650 {
4651 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
4652 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
4653 return VINF_PGM_SYNC_CR3;
4654 }
4655 AssertRCReturn(rc, rc);
4656 rc = VINF_SUCCESS;
4657
4658# ifdef IN_RC
4659 /** NOTE: We can't deal with jumps to ring 3 here as we're now in an inconsistent state! */
4660# endif
4661 /* Mark the page as locked; disallow flushing. */
4662 pgmPoolLockPage(pPool, pNewShwPageCR3);
4663
4664 pVM->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
4665 pVM->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
4666 pVM->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3;
4667# ifdef IN_RING0
4668 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4669 pVM->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4670# elif defined(IN_RC)
4671 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4672 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4673# else
4674 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4675 pVM->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4676# endif
4677 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
4678 Assert(pVM->pgm.s.pShwRootR3);
4679# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4680 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4681# endif
4682 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
4683
4684# ifndef PGM_WITHOUT_MAPPINGS
4685 /* Apply all hypervisor mappings to the new CR3.
4686 * Note that SyncCR3 will be executed in case CR3 is changed in a guest paging mode; this will
4687 * make sure we check for conflicts in the new CR3 root.
4688 */
4689# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
4690 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
4691# endif
4692 rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);
4693 AssertRCReturn(rc, rc);
4694# endif
4695
4696 /* Set the current hypervisor CR3. */
4697 CPUMSetHyperCR3(pVM, PGMGetHyperCR3(pVM));
4698
4699# ifdef IN_RC
4700 /** NOTE: Everything safe again. */
4701# endif
4702
4703 /* Clean up the old CR3 root. */
4704 if (pOldShwPageCR3)
4705 {
4706 Assert(pOldShwPageCR3->enmKind != PGMPOOLKIND_FREE);
4707# ifndef PGM_WITHOUT_MAPPINGS
4708 /* Remove the hypervisor mappings from the shadow page table. */
4709 pgmMapDeactivateCR3(pVM, pOldShwPageCR3);
4710# endif
4711 /* Mark the page as unlocked; allow flushing again. */
4712 pgmPoolUnlockPage(pPool, pOldShwPageCR3);
4713
4714 pgmPoolFreeByPage(pPool, pOldShwPageCR3, iOldShwUser, iOldShwUserTable);
4715 }
4716
4717# endif
4718#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
4719
4720 return rc;
4721}
4722
4723/**
4724 * Unmaps the shadow CR3.
4725 *
4726 * @returns VBox status, no specials.
4727 * @param pVM VM handle.
4728 */
4729PGM_BTH_DECL(int, UnmapCR3)(PVM pVM)
4730{
4731 LogFlow(("UnmapCR3\n"));
4732
4733 int rc = VINF_SUCCESS;
4734
4735 /* Update guest paging info. */
4736#if PGM_GST_TYPE == PGM_TYPE_32BIT
4737 pVM->pgm.s.pGst32BitPdR3 = 0;
4738#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4739 pVM->pgm.s.pGst32BitPdR0 = 0;
4740#endif
4741 pVM->pgm.s.pGst32BitPdRC = 0;
4742
4743#elif PGM_GST_TYPE == PGM_TYPE_PAE
4744 pVM->pgm.s.pGstPaePdptR3 = 0;
4745# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4746 pVM->pgm.s.pGstPaePdptR0 = 0;
4747# endif
4748 pVM->pgm.s.pGstPaePdptRC = 0;
4749 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
4750 {
4751 pVM->pgm.s.apGstPaePDsR3[i] = 0;
4752# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4753 pVM->pgm.s.apGstPaePDsR0[i] = 0;
4754# endif
4755 pVM->pgm.s.apGstPaePDsRC[i] = 0;
4756 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4757 }
4758
4759#elif PGM_GST_TYPE == PGM_TYPE_AMD64
4760 pVM->pgm.s.pGstAmd64Pml4R3 = 0;
4761# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4762 pVM->pgm.s.pGstAmd64Pml4R0 = 0;
4763# endif
4764# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
4765 if (!HWACCMIsNestedPagingActive(pVM))
4766 {
4767 pVM->pgm.s.pShwRootR3 = 0;
4768# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4769 pVM->pgm.s.pShwRootR0 = 0;
4770# endif
4771 pVM->pgm.s.HCPhysShwCR3 = 0;
4772 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4773 {
4774 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4775 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
4776 pVM->pgm.s.pShwPageCR3R3 = 0;
4777 pVM->pgm.s.pShwPageCR3R0 = 0;
4778 }
4779 }
4780# endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
4781
4782#else /* prot/real mode stub */
4783 /* nothing to do */
4784#endif
4785
4786#if defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */
4787 /* Update shadow paging info. */
4788# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4789 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4790 || PGM_SHW_TYPE == PGM_TYPE_AMD64))
4791
4792 Assert(!HWACCMIsNestedPagingActive(pVM));
4793
4794# ifndef PGM_WITHOUT_MAPPINGS
4795 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4796 /* Remove the hypervisor mappings from the shadow page table. */
4797 pgmMapDeactivateCR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4798# endif
4799
4800 pVM->pgm.s.pShwRootR3 = 0;
4801# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4802 pVM->pgm.s.pShwRootR0 = 0;
4803# endif
4804 pVM->pgm.s.HCPhysShwCR3 = 0;
4805 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
4806 {
4807 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4808
4809 /* Mark the page as unlocked; allow flushing again. */
4810 pgmPoolUnlockPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
4811
4812 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pVM->pgm.s.iShwUser, pVM->pgm.s.iShwUserTable);
4813 pVM->pgm.s.pShwPageCR3R3 = 0;
4814 pVM->pgm.s.pShwPageCR3R0 = 0;
4815 pVM->pgm.s.iShwUser = 0;
4816 pVM->pgm.s.iShwUserTable = 0;
4817 }
4818# endif
4819#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY && !IN_RC*/
4820
4821 return rc;
4822}
4823
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette