VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllBth.h@ 19776

Last change on this file since 19776 was 19776, checked in by vboxsync, 16 years ago

Reapplied fixed 47403

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 192.1 KB
Line 
1/* $Id: PGMAllBth.h 19776 2009-05-18 11:29:24Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
4 *
5 * This file is a big challenge!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Internal Functions *
26*******************************************************************************/
27__BEGIN_DECLS
28PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
29PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
30PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
31PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
32PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
33PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr);
34PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
35PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
36#ifdef VBOX_STRICT
37PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
38#endif
39#ifdef PGMPOOL_WITH_USER_TRACKING
40DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
41#endif
42PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
43PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu);
44__END_DECLS
45
46
47/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
48#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
49# error "Invalid combination; PAE guest implies PAE shadow"
50#endif
51
52#if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
53 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
54# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
55#endif
56
57#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
58 && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
59# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
60#endif
61
62#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT) \
63 || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
64# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
65#endif
66
67#ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */
68# define PGM_WITHOUT_MAPPINGS
69#endif
70
71
72#ifndef IN_RING3
73/**
74 * #PF Handler for raw-mode guest execution.
75 *
76 * @returns VBox status code (appropriate for trap handling and GC return).
77 *
78 * @param pVCpu VMCPU Handle.
79 * @param uErr The trap error code.
80 * @param pRegFrame Trap register frame.
81 * @param pvFault The fault address.
82 */
83PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
84{
85 PVM pVM = pVCpu->CTX_SUFF(pVM);
86
87# if defined(IN_RC) && defined(VBOX_STRICT)
88 PGMDynCheckLocks(pVM);
89# endif
90
91# if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
92 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
93 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
94
95# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
96 /*
97 * Hide the instruction fetch trap indicator for now.
98 */
99 /** @todo NXE will change this and we must fix NXE in the switcher too! */
100 if (uErr & X86_TRAP_PF_ID)
101 {
102 uErr &= ~X86_TRAP_PF_ID;
103 TRPMSetErrorCode(pVCpu, uErr);
104 }
105# endif
106
107 /*
108 * Get PDs.
109 */
110 int rc;
111# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
112# if PGM_GST_TYPE == PGM_TYPE_32BIT
113 const unsigned iPDSrc = pvFault >> GST_PD_SHIFT;
114 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
115
116# elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
117
118# if PGM_GST_TYPE == PGM_TYPE_PAE
119 unsigned iPDSrc;
120 X86PDPE PdpeSrc;
121 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, pvFault, &iPDSrc, &PdpeSrc);
122
123# elif PGM_GST_TYPE == PGM_TYPE_AMD64
124 unsigned iPDSrc;
125 PX86PML4E pPml4eSrc;
126 X86PDPE PdpeSrc;
127 PGSTPD pPDSrc;
128
129 pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
130 Assert(pPml4eSrc);
131# endif
132
133 /* Quick check for a valid guest trap. (PAE & AMD64) */
134 if (!pPDSrc)
135 {
136# if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64
137 LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%RGp\n", (int)((pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));
138# else
139 LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%RGp\n", iPDSrc, CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));
140# endif
141 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
142 TRPMSetErrorCode(pVCpu, uErr);
143 return VINF_EM_RAW_GUEST_TRAP;
144 }
145# endif
146
147# else /* !PGM_WITH_PAGING */
148 PGSTPD pPDSrc = NULL;
149 const unsigned iPDSrc = 0;
150# endif /* !PGM_WITH_PAGING */
151
152 /* Fetch the guest PDE */
153# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
154 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
155# else
156 GSTPDE PdeSrc;
157 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
158 PdeSrc.n.u1Present = 1;
159 PdeSrc.n.u1Write = 1;
160 PdeSrc.n.u1Accessed = 1;
161 PdeSrc.n.u1User = 1;
162# endif
163
164 pgmLock(pVM);
165 { /* Force the shadow pointers to go out of scope after releasing the lock. */
166# if PGM_SHW_TYPE == PGM_TYPE_32BIT
167 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT;
168 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
169
170# elif PGM_SHW_TYPE == PGM_TYPE_PAE
171 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */
172
173 PX86PDPAE pPDDst;
174# if PGM_GST_TYPE != PGM_TYPE_PAE
175 X86PDPE PdpeSrc;
176
177 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
178 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
179# endif
180 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
181 if (rc != VINF_SUCCESS)
182 {
183 pgmUnlock(pVM);
184 AssertRC(rc);
185 return rc;
186 }
187 Assert(pPDDst);
188
189# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
190 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
191 PX86PDPAE pPDDst;
192# if PGM_GST_TYPE == PGM_TYPE_PROT
193 /* AMD-V nested paging */
194 X86PML4E Pml4eSrc;
195 X86PDPE PdpeSrc;
196 PX86PML4E pPml4eSrc = &Pml4eSrc;
197
198 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
199 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
200 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
201# endif
202
203 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
204 if (rc != VINF_SUCCESS)
205 {
206 pgmUnlock(pVM);
207 AssertRC(rc);
208 return rc;
209 }
210 Assert(pPDDst);
211
212# elif PGM_SHW_TYPE == PGM_TYPE_EPT
213 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
214 PEPTPD pPDDst;
215
216 rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
217 if (rc != VINF_SUCCESS)
218 {
219 pgmUnlock(pVM);
220 AssertRC(rc);
221 return rc;
222 }
223 Assert(pPDDst);
224# endif
225
226# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
227 /*
228 * If we successfully correct the write protection fault due to dirty bit
229 * tracking, or this page fault is a genuine one, then return immediately.
230 */
231 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
232 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
233 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
234 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
235 || rc == VINF_EM_RAW_GUEST_TRAP)
236 {
237 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
238 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
239 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
240 pgmUnlock(pVM);
241 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
242 }
243
244 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]);
245# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
246
247 /*
248 * A common case is the not-present error caused by lazy page table syncing.
249 *
250 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
251 * so we can safely assume that the shadow PT is present when calling SyncPage later.
252 *
253 * On failure, we ASSUME that SyncPT is out of memory or detected some kind
254 * of mapping conflict and defer to SyncCR3 in R3.
255 * (Again, we do NOT support access handlers for non-present guest pages.)
256 *
257 */
258 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
259 && !pPDDst->a[iPDDst].n.u1Present
260 && PdeSrc.n.u1Present
261 )
262 {
263 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
264 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
265 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
266 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
267 pgmUnlock(pVM);
268 if (RT_SUCCESS(rc))
269 {
270 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
271 return rc;
272 }
273 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
274 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
275 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
276 return VINF_PGM_SYNC_CR3;
277 }
278 pgmUnlock(pVM);
279 }
280
281# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
282 /*
283 * Check if this address is within any of our mappings.
284 *
285 * This is *very* fast and it's gonna save us a bit of effort below and prevent
286 * us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).
287 * (BTW, it's impossible to have physical access handlers in a mapping.)
288 */
289 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
290 {
291 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeMapping, a);
292 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
293 for ( ; pMapping; pMapping = pMapping->CTX_SUFF(pNext))
294 {
295 if (pvFault < pMapping->GCPtr)
296 break;
297 if (pvFault - pMapping->GCPtr < pMapping->cb)
298 {
299 /*
300 * The first thing we check is if we've got an undetected conflict.
301 */
302 if (!pVM->pgm.s.fMappingsFixed)
303 {
304 unsigned iPT = pMapping->cb >> GST_PD_SHIFT;
305 while (iPT-- > 0)
306 if (pPDSrc->a[iPDSrc + iPT].n.u1Present)
307 {
308 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eConflicts);
309 Log(("Trap0e: Detected Conflict %RGv-%RGv\n", pMapping->GCPtr, pMapping->GCPtrLast));
310 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
311 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeMapping, a);
312 return VINF_PGM_SYNC_CR3;
313 }
314 }
315
316 /*
317 * Check if the fault address is in a virtual page access handler range.
318 */
319 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->HyperVirtHandlers, pvFault);
320 if ( pCur
321 && pvFault - pCur->Core.Key < pCur->cb
322 && uErr & X86_TRAP_PF_RW)
323 {
324# ifdef IN_RC
325 STAM_PROFILE_START(&pCur->Stat, h);
326 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
327 STAM_PROFILE_STOP(&pCur->Stat, h);
328# else
329 AssertFailed();
330 rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
331# endif
332 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersMapping);
333 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeMapping, a);
334 return rc;
335 }
336
337 /*
338 * Pretend we're not here and let the guest handle the trap.
339 */
340 TRPMSetErrorCode(pVCpu, uErr & ~X86_TRAP_PF_P);
341 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPFMapping);
342 LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));
343 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeMapping, a);
344 return VINF_EM_RAW_GUEST_TRAP;
345 }
346 }
347 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeMapping, a);
348 } /* pgmAreMappingsEnabled(&pVM->pgm.s) */
349# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
350
351 /*
352 * Check if this fault address is flagged for special treatment,
353 * which means we'll have to figure out the physical address and
354 * check flags associated with it.
355 *
356 * ASSUME that we can limit any special access handling to pages
357 * in page tables which the guest believes to be present.
358 */
359 if (PdeSrc.n.u1Present)
360 {
361 RTGCPHYS GCPhys = NIL_RTGCPHYS;
362
363# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
364# if PGM_GST_TYPE == PGM_TYPE_AMD64
365 bool fBigPagesSupported = true;
366# else
367 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
368# endif
369 if ( PdeSrc.b.u1Size
370 && fBigPagesSupported)
371 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc)
372 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
373 else
374 {
375 PGSTPT pPTSrc;
376 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
377 if (RT_SUCCESS(rc))
378 {
379 unsigned iPTESrc = (pvFault >> GST_PT_SHIFT) & GST_PT_MASK;
380 if (pPTSrc->a[iPTESrc].n.u1Present)
381 GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;
382 }
383 }
384# else
385 /* No paging so the fault address is the physical address */
386 GCPhys = (RTGCPHYS)(pvFault & ~PAGE_OFFSET_MASK);
387# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
388
389 /*
390 * If we have a GC address we'll check if it has any flags set.
391 */
392 if (GCPhys != NIL_RTGCPHYS)
393 {
394 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
395
396 PPGMPAGE pPage;
397 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
398 if (RT_SUCCESS(rc)) /** just handle the failure immediate (it returns) and make things easier to read. */
399 {
400 if ( PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage)
401 || PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
402 {
403 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
404 {
405 /*
406 * Physical page access handler.
407 */
408 const RTGCPHYS GCPhysFault = GCPhys | (pvFault & PAGE_OFFSET_MASK);
409 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
410 if (pCur)
411 {
412# ifdef PGM_SYNC_N_PAGES
413 /*
414 * If the region is write protected and we got a page not present fault, then sync
415 * the pages. If the fault was caused by a read, then restart the instruction.
416 * In case of write access continue to the GC write handler.
417 *
418 * ASSUMES that there is only one handler per page or that they have similar write properties.
419 */
420 if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
421 && !(uErr & X86_TRAP_PF_P))
422 {
423 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
424 if ( RT_FAILURE(rc)
425 || !(uErr & X86_TRAP_PF_RW)
426 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
427 {
428 AssertRC(rc);
429 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
430 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
431 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
432 return rc;
433 }
434 }
435# endif
436
437 AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
438 || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
439 ("Unexpected trap for physical handler: %08X (phys=%08x) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
440
441# if defined(IN_RC) || defined(IN_RING0)
442 if (pCur->CTX_SUFF(pfnHandler))
443 {
444 STAM_PROFILE_START(&pCur->Stat, h);
445 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pCur->CTX_SUFF(pvUser));
446 STAM_PROFILE_STOP(&pCur->Stat, h);
447 }
448 else
449# endif
450 rc = VINF_EM_RAW_EMULATE_INSTR;
451 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersPhysical);
452 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
453 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndPhys; });
454 return rc;
455 }
456 }
457# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
458 else
459 {
460# ifdef PGM_SYNC_N_PAGES
461 /*
462 * If the region is write protected and we got a page not present fault, then sync
463 * the pages. If the fault was caused by a read, then restart the instruction.
464 * In case of write access continue to the GC write handler.
465 */
466 if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
467 && !(uErr & X86_TRAP_PF_P))
468 {
469 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
470 if ( RT_FAILURE(rc)
471 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
472 || !(uErr & X86_TRAP_PF_RW))
473 {
474 AssertRC(rc);
475 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
476 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
477 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; });
478 return rc;
479 }
480 }
481# endif
482 /*
483 * Ok, it's an virtual page access handler.
484 *
485 * Since it's faster to search by address, we'll do that first
486 * and then retry by GCPhys if that fails.
487 */
488 /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
489 /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
490 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
491 */
492 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
493 if (pCur)
494 {
495 AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb)
496 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
497 || !(uErr & X86_TRAP_PF_P)
498 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
499 ("Unexpected trap for virtual handler: %RGv (phys=%RGp) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
500
501 if ( pvFault - pCur->Core.Key < pCur->cb
502 && ( uErr & X86_TRAP_PF_RW
503 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
504 {
505# ifdef IN_RC
506 STAM_PROFILE_START(&pCur->Stat, h);
507 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
508 STAM_PROFILE_STOP(&pCur->Stat, h);
509# else
510 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
511# endif
512 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtual);
513 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
514 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
515 return rc;
516 }
517 /* Unhandled part of a monitored page */
518 }
519 else
520 {
521 /* Check by physical address. */
522 PPGMVIRTHANDLER pCur;
523 unsigned iPage;
524 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK),
525 &pCur, &iPage);
526 Assert(RT_SUCCESS(rc) || !pCur);
527 if ( pCur
528 && ( uErr & X86_TRAP_PF_RW
529 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
530 {
531 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys);
532# ifdef IN_RC
533 RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK);
534 Assert(off < pCur->cb);
535 STAM_PROFILE_START(&pCur->Stat, h);
536 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off);
537 STAM_PROFILE_STOP(&pCur->Stat, h);
538# else
539 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
540# endif
541 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualByPhys);
542 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
543 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
544 return rc;
545 }
546 }
547 }
548# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
549
550 /*
551 * There is a handled area of the page, but this fault doesn't belong to it.
552 * We must emulate the instruction.
553 *
554 * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
555 * we first check if this was a page-not-present fault for a page with only
556 * write access handlers. Restart the instruction if it wasn't a write access.
557 */
558 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersUnhandled);
559
560 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
561 && !(uErr & X86_TRAP_PF_P))
562 {
563 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
564 if ( RT_FAILURE(rc)
565 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
566 || !(uErr & X86_TRAP_PF_RW))
567 {
568 AssertRC(rc);
569 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
570 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
571 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
572 return rc;
573 }
574 }
575
576 /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
577 * It's writing to an unhandled part of the LDT page several million times.
578 */
579 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault);
580 LogFlow(("PGM: PGMInterpretInstruction -> rc=%d pPage=%R[pgmpage]\n", rc, pPage));
581 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
582 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; });
583 return rc;
584 } /* if any kind of handler */
585
586# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
587 if (uErr & X86_TRAP_PF_P)
588 {
589 /*
590 * The page isn't marked, but it might still be monitored by a virtual page access handler.
591 * (ASSUMES no temporary disabling of virtual handlers.)
592 */
593 /** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here,
594 * we should correct both the shadow page table and physical memory flags, and not only check for
595 * accesses within the handler region but for access to pages with virtual handlers. */
596 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
597 if (pCur)
598 {
599 AssertMsg( !(pvFault - pCur->Core.Key < pCur->cb)
600 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
601 || !(uErr & X86_TRAP_PF_P)
602 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
603 ("Unexpected trap for virtual handler: %08X (phys=%08x) %R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
604
605 if ( pvFault - pCur->Core.Key < pCur->cb
606 && ( uErr & X86_TRAP_PF_RW
607 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
608 {
609# ifdef IN_RC
610 STAM_PROFILE_START(&pCur->Stat, h);
611 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
612 STAM_PROFILE_STOP(&pCur->Stat, h);
613# else
614 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
615# endif
616 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualUnmarked);
617 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
618 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
619 return rc;
620 }
621 }
622 }
623# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
624 }
625 else
626 {
627 /*
628 * When the guest accesses invalid physical memory (e.g. probing
629 * of RAM or accessing a remapped MMIO range), then we'll fall
630 * back to the recompiler to emulate the instruction.
631 */
632 LogFlow(("PGM #PF: pgmPhysGetPageEx(%RGp) failed with %Rrc\n", GCPhys, rc));
633 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersInvalid);
634 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
635 return VINF_EM_RAW_EMULATE_INSTR;
636 }
637
638 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
639
640# ifdef PGM_OUT_OF_SYNC_IN_GC /** @todo remove this bugger. */
641 /*
642 * We are here only if page is present in Guest page tables and
643 * trap is not handled by our handlers.
644 *
645 * Check it for page out-of-sync situation.
646 */
647 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
648
649 if (!(uErr & X86_TRAP_PF_P))
650 {
651 /*
652 * Page is not present in our page tables.
653 * Try to sync it!
654 * BTW, fPageShw is invalid in this branch!
655 */
656 if (uErr & X86_TRAP_PF_US)
657 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
658 else /* supervisor */
659 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
660
661# if defined(LOG_ENABLED) && !defined(IN_RING0)
662 RTGCPHYS GCPhys;
663 uint64_t fPageGst;
664 PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys);
665 Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst=%08llx GCPhys=%RGp scan=%d\n",
666 pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst, GCPhys, CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)));
667# endif /* LOG_ENABLED */
668
669# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0)
670 if (CPUMGetGuestCPL(pVCpu, pRegFrame) == 0)
671 {
672 uint64_t fPageGst;
673 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL);
674 if ( RT_SUCCESS(rc)
675 && !(fPageGst & X86_PTE_US))
676 {
677 /* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */
678 if ( pvFault == (RTGCPTR)pRegFrame->eip
679 || pvFault - pRegFrame->eip < 8 /* instruction crossing a page boundary */
680# ifdef CSAM_DETECT_NEW_CODE_PAGES
681 || ( !PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip)
682 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)) /* any new code we encounter here */
683# endif /* CSAM_DETECT_NEW_CODE_PAGES */
684 )
685 {
686 LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip));
687 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip);
688 if (rc != VINF_SUCCESS)
689 {
690 /*
691 * CSAM needs to perform a job in ring 3.
692 *
693 * Sync the page before going to the host context; otherwise we'll end up in a loop if
694 * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
695 */
696 LogFlow(("CSAM ring 3 job\n"));
697 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);
698 AssertRC(rc2);
699
700 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
701 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2CSAM; });
702 return rc;
703 }
704 }
705# ifdef CSAM_DETECT_NEW_CODE_PAGES
706 else if ( uErr == X86_TRAP_PF_RW
707 && pRegFrame->ecx >= 0x100 /* early check for movswd count */
708 && pRegFrame->ecx < 0x10000)
709 {
710 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions
711 * to detect loading of new code pages.
712 */
713
714 /*
715 * Decode the instruction.
716 */
717 RTGCPTR PC;
718 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
719 if (rc == VINF_SUCCESS)
720 {
721 DISCPUSTATE Cpu;
722 uint32_t cbOp;
723 rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, &Cpu, &cbOp);
724
725 /* For now we'll restrict this to rep movsw/d instructions */
726 if ( rc == VINF_SUCCESS
727 && Cpu.pCurInstr->opcode == OP_MOVSWD
728 && (Cpu.prefix & PREFIX_REP))
729 {
730 CSAMMarkPossibleCodePage(pVM, pvFault);
731 }
732 }
733 }
734# endif /* CSAM_DETECT_NEW_CODE_PAGES */
735
736 /*
737 * Mark this page as safe.
738 */
739 /** @todo not correct for pages that contain both code and data!! */
740 Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true));
741 CSAMMarkPage(pVM, (RTRCPTR)pvFault, true);
742 }
743 }
744# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
745 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
746 if (RT_SUCCESS(rc))
747 {
748 /* The page was successfully synced, return to the guest. */
749 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
750 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSync; });
751 return VINF_SUCCESS;
752 }
753 }
754 else /* uErr & X86_TRAP_PF_P: */
755 {
756 /*
757 * Write protected pages is make writable when the guest makes the first
758 * write to it. This happens for pages that are shared, write monitored
759 * and not yet allocated.
760 *
761 * Also, a side effect of not flushing global PDEs are out of sync pages due
762 * to physical monitored regions, that are no longer valid.
763 * Assume for now it only applies to the read/write flag.
764 */
765 if (RT_SUCCESS(rc) && (uErr & X86_TRAP_PF_RW))
766 {
767 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
768 {
769 Log(("PGM #PF: Make writable: %RGp %R[pgmpage] pvFault=%RGp uErr=%#x\n",
770 GCPhys, pPage, pvFault, uErr));
771 rc = pgmPhysPageMakeWritableUnlocked(pVM, pPage, GCPhys);
772 if (rc != VINF_SUCCESS)
773 {
774 AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc));
775 return rc;
776 }
777 if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)))
778 return VINF_EM_NO_MEMORY;
779 }
780 /// @todo count the above case; else
781 if (uErr & X86_TRAP_PF_US)
782 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
783 else /* supervisor */
784 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
785
786 /*
787 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the
788 * page is not present, which is not true in this case.
789 */
790 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);
791 if (RT_SUCCESS(rc))
792 {
793 /*
794 * Page was successfully synced, return to guest.
795 */
796# ifdef VBOX_STRICT
797 RTGCPHYS GCPhys;
798 uint64_t fPageGst;
799 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys);
800 Assert(RT_SUCCESS(rc) && fPageGst & X86_PTE_RW);
801 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys, (uint64_t)fPageGst));
802
803 uint64_t fPageShw;
804 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL);
805 AssertMsg(RT_SUCCESS(rc) && fPageShw & X86_PTE_RW, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw));
806# endif /* VBOX_STRICT */
807 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
808 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndObs; });
809 return VINF_SUCCESS;
810 }
811
812 /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */
813 if ( CPUMGetGuestCPL(pVCpu, pRegFrame) == 0
814 && ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG)
815 && (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P))
816 {
817 uint64_t fPageGst;
818 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL);
819 if ( RT_SUCCESS(rc)
820 && !(fPageGst & X86_PTE_RW))
821 {
822 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault);
823 if (RT_SUCCESS(rc))
824 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulInRZ);
825 else
826 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulToR3);
827 return rc;
828 }
829 AssertMsgFailed(("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc));
830 }
831 }
832
833# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
834# ifdef VBOX_STRICT
835 /*
836 * Check for VMM page flags vs. Guest page flags consistency.
837 * Currently only for debug purposes.
838 */
839 if (RT_SUCCESS(rc))
840 {
841 /* Get guest page flags. */
842 uint64_t fPageGst;
843 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL);
844 if (RT_SUCCESS(rc))
845 {
846 uint64_t fPageShw;
847 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL);
848
849 /*
850 * Compare page flags.
851 * Note: we have AVL, A, D bits desynched.
852 */
853 AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)),
854 ("Page flags mismatch! pvFault=%RGv uErr=%x GCPhys=%RGp fPageShw=%RX64 fPageGst=%RX64\n", pvFault, (uint32_t)uErr, GCPhys, fPageShw, fPageGst));
855 }
856 else
857 AssertMsgFailed(("PGMGstGetPage rc=%Rrc\n", rc));
858 }
859 else
860 AssertMsgFailed(("PGMGCGetPage rc=%Rrc\n", rc));
861# endif /* VBOX_STRICT */
862# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
863 }
864 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
865# endif /* PGM_OUT_OF_SYNC_IN_GC */
866 }
867 else /* GCPhys == NIL_RTGCPHYS */
868 {
869 /*
870 * Page not present in Guest OS or invalid page table address.
871 * This is potential virtual page access handler food.
872 *
873 * For the present we'll say that our access handlers don't
874 * work for this case - we've already discarded the page table
875 * not present case which is identical to this.
876 *
877 * When we perchance find we need this, we will probably have AVL
878 * trees (offset based) to operate on and we can measure their speed
879 * agains mapping a page table and probably rearrange this handling
880 * a bit. (Like, searching virtual ranges before checking the
881 * physical address.)
882 */
883 }
884 }
885 /* else: !present (guest) */
886
887
888# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
889 /*
890 * Conclusion, this is a guest trap.
891 */
892 LogFlow(("PGM: Unhandled #PF -> route trap to recompiler!\n"));
893 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPFUnh);
894 return VINF_EM_RAW_GUEST_TRAP;
895# else
896 /* present, but not a monitored page; perhaps the guest is probing physical memory */
897 return VINF_EM_RAW_EMULATE_INSTR;
898# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
899
900
901# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
902
903 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
904 return VERR_INTERNAL_ERROR;
905# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
906}
907#endif /* !IN_RING3 */
908
909
910/**
911 * Emulation of the invlpg instruction.
912 *
913 *
914 * @returns VBox status code.
915 *
916 * @param pVCpu The VMCPU handle.
917 * @param GCPtrPage Page to invalidate.
918 *
919 * @remark ASSUMES that the guest is updating before invalidating. This order
920 * isn't required by the CPU, so this is speculative and could cause
921 * trouble.
922 *
923 * @todo Flush page or page directory only if necessary!
924 * @todo Add a #define for simply invalidating the page.
925 */
926PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage)
927{
928#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \
929 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
930 && PGM_SHW_TYPE != PGM_TYPE_EPT
931 int rc;
932 PVM pVM = pVCpu->CTX_SUFF(pVM);
933 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
934
935 LogFlow(("InvalidatePage %RGv\n", GCPtrPage));
936 /*
937 * Get the shadow PD entry and skip out if this PD isn't present.
938 * (Guessing that it is frequent for a shadow PDE to not be present, do this first.)
939 */
940# if PGM_SHW_TYPE == PGM_TYPE_32BIT
941 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
942 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVCpu->pgm.s, GCPtrPage);
943
944 /* Fetch the pgm pool shadow descriptor. */
945 PPGMPOOLPAGE pShwPde = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
946 Assert(pShwPde);
947
948# elif PGM_SHW_TYPE == PGM_TYPE_PAE
949 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT);
950 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
951
952 /* If the shadow PDPE isn't present, then skip the invalidate. */
953 if (!pPdptDst->a[iPdpt].n.u1Present)
954 {
955 Assert(!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING));
956 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
957 return VINF_SUCCESS;
958 }
959
960 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
961 PPGMPOOLPAGE pShwPde = NULL;
962 PX86PDPAE pPDDst;
963
964 /* Fetch the pgm pool shadow descriptor. */
965 rc = pgmShwGetPaePoolPagePD(&pVCpu->pgm.s, GCPtrPage, &pShwPde);
966 AssertRCSuccessReturn(rc, rc);
967 Assert(pShwPde);
968
969 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
970 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
971
972# else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
973 /* PML4 */
974 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;
975 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
976 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
977 PX86PDPAE pPDDst;
978 PX86PDPT pPdptDst;
979 PX86PML4E pPml4eDst;
980 rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eDst, &pPdptDst, &pPDDst);
981 if (rc != VINF_SUCCESS)
982 {
983 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
984 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
985 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
986 PGM_INVL_GUEST_TLBS();
987 return VINF_SUCCESS;
988 }
989 Assert(pPDDst);
990
991 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
992 PX86PDPE pPdpeDst = &pPdptDst->a[iPdpt];
993
994 if (!pPdpeDst->n.u1Present)
995 {
996 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
997 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
998 PGM_INVL_GUEST_TLBS();
999 return VINF_SUCCESS;
1000 }
1001
1002# endif /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
1003
1004 const SHWPDE PdeDst = *pPdeDst;
1005 if (!PdeDst.n.u1Present)
1006 {
1007 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
1008 return VINF_SUCCESS;
1009 }
1010
1011# if defined(IN_RC)
1012 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
1013 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
1014# endif
1015
1016 /*
1017 * Get the guest PD entry and calc big page.
1018 */
1019# if PGM_GST_TYPE == PGM_TYPE_32BIT
1020 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
1021 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
1022 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
1023# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1024 unsigned iPDSrc = 0;
1025# if PGM_GST_TYPE == PGM_TYPE_PAE
1026 X86PDPE PdpeSrc;
1027 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
1028# else /* AMD64 */
1029 PX86PML4E pPml4eSrc;
1030 X86PDPE PdpeSrc;
1031 PX86PDPAE pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
1032# endif
1033 GSTPDE PdeSrc;
1034
1035 if (pPDSrc)
1036 PdeSrc = pPDSrc->a[iPDSrc];
1037 else
1038 PdeSrc.u = 0;
1039# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
1040
1041# if PGM_GST_TYPE == PGM_TYPE_AMD64
1042 const bool fIsBigPage = PdeSrc.b.u1Size;
1043# else
1044 const bool fIsBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
1045# endif
1046
1047# ifdef IN_RING3
1048 /*
1049 * If a CR3 Sync is pending we may ignore the invalidate page operation
1050 * depending on the kind of sync and if it's a global page or not.
1051 * This doesn't make sense in GC/R0 so we'll skip it entirely there.
1052 */
1053# ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
1054 if ( VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
1055 || ( VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1056 && fIsBigPage
1057 && PdeSrc.b.u1Global
1058 )
1059 )
1060# else
1061 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) )
1062# endif
1063 {
1064 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
1065 return VINF_SUCCESS;
1066 }
1067# endif /* IN_RING3 */
1068
1069# if PGM_GST_TYPE == PGM_TYPE_AMD64
1070 /* Fetch the pgm pool shadow descriptor. */
1071 PPGMPOOLPAGE pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
1072 Assert(pShwPdpt);
1073
1074 /* Fetch the pgm pool shadow descriptor. */
1075 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & SHW_PDPE_PG_MASK);
1076 Assert(pShwPde);
1077
1078 Assert(pPml4eDst->n.u1Present && (pPml4eDst->u & SHW_PDPT_MASK));
1079 RTGCPHYS GCPhysPdpt = pPml4eSrc->u & X86_PML4E_PG_MASK;
1080
1081 if ( !pPml4eSrc->n.u1Present
1082 || pShwPdpt->GCPhys != GCPhysPdpt)
1083 {
1084 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1085 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1086 pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1087 ASMAtomicWriteSize(pPml4eDst, 0);
1088 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1089 PGM_INVL_GUEST_TLBS();
1090 return VINF_SUCCESS;
1091 }
1092 if ( pPml4eSrc->n.u1User != pPml4eDst->n.u1User
1093 || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write))
1094 {
1095 /*
1096 * Mark not present so we can resync the PML4E when it's used.
1097 */
1098 LogFlow(("InvalidatePage: Out-of-sync PML4E at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1099 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1100 pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1101 ASMAtomicWriteSize(pPml4eDst, 0);
1102 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1103 PGM_INVL_GUEST_TLBS();
1104 }
1105 else if (!pPml4eSrc->n.u1Accessed)
1106 {
1107 /*
1108 * Mark not present so we can set the accessed bit.
1109 */
1110 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
1111 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
1112 pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
1113 ASMAtomicWriteSize(pPml4eDst, 0);
1114 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1115 PGM_INVL_GUEST_TLBS();
1116 }
1117
1118 /* Check if the PDPT entry has changed. */
1119 Assert(pPdpeDst->n.u1Present && pPdpeDst->u & SHW_PDPT_MASK);
1120 RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK;
1121 if ( !PdpeSrc.n.u1Present
1122 || pShwPde->GCPhys != GCPhysPd)
1123 {
1124 LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
1125 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1126 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1127 ASMAtomicWriteSize(pPdpeDst, 0);
1128 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1129 PGM_INVL_GUEST_TLBS();
1130 return VINF_SUCCESS;
1131 }
1132 if ( PdpeSrc.lm.u1User != pPdpeDst->lm.u1User
1133 || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write))
1134 {
1135 /*
1136 * Mark not present so we can resync the PDPTE when it's used.
1137 */
1138 LogFlow(("InvalidatePage: Out-of-sync PDPE at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1139 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1140 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1141 ASMAtomicWriteSize(pPdpeDst, 0);
1142 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1143 PGM_INVL_GUEST_TLBS();
1144 }
1145 else if (!PdpeSrc.lm.u1Accessed)
1146 {
1147 /*
1148 * Mark not present so we can set the accessed bit.
1149 */
1150 LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
1151 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
1152 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
1153 ASMAtomicWriteSize(pPdpeDst, 0);
1154 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1155 PGM_INVL_GUEST_TLBS();
1156 }
1157# endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
1158
1159 /*
1160 * Deal with the Guest PDE.
1161 */
1162 rc = VINF_SUCCESS;
1163 if (PdeSrc.n.u1Present)
1164 {
1165 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
1166 {
1167 /*
1168 * Conflict - Let SyncPT deal with it to avoid duplicate code.
1169 */
1170 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1171 Assert(PGMGetGuestMode(pVCpu) <= PGMMODE_PAE);
1172 pgmLock(pVM);
1173 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
1174 pgmUnlock(pVM);
1175 }
1176 else if ( PdeSrc.n.u1User != PdeDst.n.u1User
1177 || (!PdeSrc.n.u1Write && PdeDst.n.u1Write))
1178 {
1179 /*
1180 * Mark not present so we can resync the PDE when it's used.
1181 */
1182 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1183 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1184 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1185 ASMAtomicWriteSize(pPdeDst, 0);
1186 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1187 PGM_INVL_GUEST_TLBS();
1188 }
1189 else if (!PdeSrc.n.u1Accessed)
1190 {
1191 /*
1192 * Mark not present so we can set the accessed bit.
1193 */
1194 LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1195 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1196 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1197 ASMAtomicWriteSize(pPdeDst, 0);
1198 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
1199 PGM_INVL_GUEST_TLBS();
1200 }
1201 else if (!fIsBigPage)
1202 {
1203 /*
1204 * 4KB - page.
1205 */
1206 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
1207 RTGCPHYS GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1208# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1209 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1210 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1211# endif
1212 if (pShwPage->GCPhys == GCPhys)
1213 {
1214# if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
1215 const unsigned iPTEDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1216 PSHWPT pPT = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1217 if (pPT->a[iPTEDst].n.u1Present)
1218 {
1219# ifdef PGMPOOL_WITH_USER_TRACKING
1220 /* This is very unlikely with caching/monitoring enabled. */
1221 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
1222# endif
1223 pPT->a[iPTEDst].u = 0;
1224 }
1225# else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
1226 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
1227 if (RT_SUCCESS(rc))
1228 rc = VINF_SUCCESS;
1229# endif
1230 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage4KBPages));
1231 PGM_INVL_PG(GCPtrPage);
1232 }
1233 else
1234 {
1235 /*
1236 * The page table address changed.
1237 */
1238 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n",
1239 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
1240 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1241 ASMAtomicWriteSize(pPdeDst, 0);
1242 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
1243 PGM_INVL_GUEST_TLBS();
1244 }
1245 }
1246 else
1247 {
1248 /*
1249 * 2/4MB - page.
1250 */
1251 /* Before freeing the page, check if anything really changed. */
1252 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
1253 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1254# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1255 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1256 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1257# endif
1258 if ( pShwPage->GCPhys == GCPhys
1259 && pShwPage->enmKind == BTH_PGMPOOLKIND_PT_FOR_BIG)
1260 {
1261 /* ASSUMES a the given bits are identical for 4M and normal PDEs */
1262 /** @todo PAT */
1263 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1264 == (PdeDst.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD))
1265 && ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
1266 || (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)))
1267 {
1268 LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
1269 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip));
1270# if defined(IN_RC)
1271 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
1272 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
1273# endif
1274 return VINF_SUCCESS;
1275 }
1276 }
1277
1278 /*
1279 * Ok, the page table is present and it's been changed in the guest.
1280 * If we're in host context, we'll just mark it as not present taking the lazy approach.
1281 * We could do this for some flushes in GC too, but we need an algorithm for
1282 * deciding which 4MB pages containing code likely to be executed very soon.
1283 */
1284 LogFlow(("InvalidatePage: Out-of-sync PD at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
1285 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
1286 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1287 ASMAtomicWriteSize(pPdeDst, 0);
1288 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages));
1289 PGM_INVL_BIG_PG(GCPtrPage);
1290 }
1291 }
1292 else
1293 {
1294 /*
1295 * Page directory is not present, mark shadow PDE not present.
1296 */
1297 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
1298 {
1299 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
1300 ASMAtomicWriteSize(pPdeDst, 0);
1301 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
1302 PGM_INVL_PG(GCPtrPage);
1303 }
1304 else
1305 {
1306 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
1307 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDMappings));
1308 }
1309 }
1310# if defined(IN_RC)
1311 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
1312 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
1313# endif
1314 return rc;
1315
1316#else /* guest real and protected mode */
1317 /* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */
1318 return VINF_SUCCESS;
1319#endif
1320}
1321
1322
1323#ifdef PGMPOOL_WITH_USER_TRACKING
1324/**
1325 * Update the tracking of shadowed pages.
1326 *
1327 * @param pVCpu The VMCPU handle.
1328 * @param pShwPage The shadow page.
1329 * @param HCPhys The physical page we is being dereferenced.
1330 */
1331DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
1332{
1333# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1334 PVM pVM = pVCpu->CTX_SUFF(pVM);
1335
1336 STAM_PROFILE_START(&pVM->pgm.s.StatTrackDeref, a);
1337 LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
1338
1339 /** @todo If this turns out to be a bottle neck (*very* likely) two things can be done:
1340 * 1. have a medium sized HCPhys -> GCPhys TLB (hash?)
1341 * 2. write protect all shadowed pages. I.e. implement caching.
1342 */
1343 /*
1344 * Find the guest address.
1345 */
1346 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1347 pRam;
1348 pRam = pRam->CTX_SUFF(pNext))
1349 {
1350 unsigned iPage = pRam->cb >> PAGE_SHIFT;
1351 while (iPage-- > 0)
1352 {
1353 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
1354 {
1355 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1356 pgmTrackDerefGCPhys(pPool, pShwPage, &pRam->aPages[iPage]);
1357 pShwPage->cPresent--;
1358 pPool->cPresent--;
1359 STAM_PROFILE_STOP(&pVM->pgm.s.StatTrackDeref, a);
1360 return;
1361 }
1362 }
1363 }
1364
1365 for (;;)
1366 AssertReleaseMsgFailed(("HCPhys=%RHp wasn't found!\n", HCPhys));
1367# else /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1368 pShwPage->cPresent--;
1369 pVM->pgm.s.CTX_SUFF(pPool)->cPresent--;
1370# endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */
1371}
1372
1373
1374/**
1375 * Update the tracking of shadowed pages.
1376 *
1377 * @param pVCpu The VMCPU handle.
1378 * @param pShwPage The shadow page.
1379 * @param u16 The top 16-bit of the pPage->HCPhys.
1380 * @param pPage Pointer to the guest page. this will be modified.
1381 * @param iPTDst The index into the shadow table.
1382 */
1383DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
1384{
1385 PVM pVM = pVCpu->CTX_SUFF(pVM);
1386# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1387 /*
1388 * Just deal with the simple first time here.
1389 */
1390 if (!u16)
1391 {
1392 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackVirgin);
1393 u16 = PGMPOOL_TD_MAKE(1, pShwPage->idx);
1394 }
1395 else
1396 u16 = pgmPoolTrackPhysExtAddref(pVM, u16, pShwPage->idx);
1397
1398 /* write back */
1399 Log2(("SyncPageWorkerTrackAddRef: u16=%#x->%#x iPTDst=%#x\n", u16, PGM_PAGE_GET_TRACKING(pPage), iPTDst));
1400 PGM_PAGE_SET_TRACKING(pPage, u16);
1401
1402# endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1403
1404 /* update statistics. */
1405 pVM->pgm.s.CTX_SUFF(pPool)->cPresent++;
1406 pShwPage->cPresent++;
1407 if (pShwPage->iFirstPresent > iPTDst)
1408 pShwPage->iFirstPresent = iPTDst;
1409}
1410#endif /* PGMPOOL_WITH_USER_TRACKING */
1411
1412
1413/**
1414 * Creates a 4K shadow page for a guest page.
1415 *
1416 * For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the
1417 * physical address. The PdeSrc argument only the flags are used. No page structured
1418 * will be mapped in this function.
1419 *
1420 * @param pVCpu The VMCPU handle.
1421 * @param pPteDst Destination page table entry.
1422 * @param PdeSrc Source page directory entry (i.e. Guest OS page directory entry).
1423 * Can safely assume that only the flags are being used.
1424 * @param PteSrc Source page table entry (i.e. Guest OS page table entry).
1425 * @param pShwPage Pointer to the shadow page.
1426 * @param iPTDst The index into the shadow table.
1427 *
1428 * @remark Not used for 2/4MB pages!
1429 */
1430DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
1431{
1432 if (PteSrc.n.u1Present)
1433 {
1434 PVM pVM = pVCpu->CTX_SUFF(pVM);
1435
1436 /*
1437 * Find the ram range.
1438 */
1439 PPGMPAGE pPage;
1440 int rc = pgmPhysGetPageEx(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK, &pPage);
1441 if (RT_SUCCESS(rc))
1442 {
1443#ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
1444 /* Try make the page writable if necessary. */
1445 if ( PteSrc.n.u1Write
1446 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1447 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1448 {
1449 rc = pgmPhysPageMakeWritableUnlocked(pVM, pPage, PteSrc.u & GST_PTE_PG_MASK);
1450 AssertRC(rc);
1451 }
1452#endif
1453
1454 /** @todo investiage PWT, PCD and PAT. */
1455 /*
1456 * Make page table entry.
1457 */
1458 SHWPTE PteDst;
1459 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1460 {
1461 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
1462 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1463 {
1464#if PGM_SHW_TYPE == PGM_TYPE_EPT
1465 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage);
1466 PteDst.n.u1Present = 1;
1467 PteDst.n.u1Execute = 1;
1468 PteDst.n.u1IgnorePAT = 1;
1469 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1470 /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */
1471#else
1472 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1473 | PGM_PAGE_GET_HCPHYS(pPage);
1474#endif
1475 }
1476 else
1477 {
1478 LogFlow(("SyncPageWorker: monitored page (%RHp) -> mark not present\n", PGM_PAGE_GET_HCPHYS(pPage)));
1479 PteDst.u = 0;
1480 }
1481 /** @todo count these two kinds. */
1482 }
1483 else
1484 {
1485#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1486 /*
1487 * If the page or page directory entry is not marked accessed,
1488 * we mark the page not present.
1489 */
1490 if (!PteSrc.n.u1Accessed || !PdeSrc.n.u1Accessed)
1491 {
1492 LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n"));
1493 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,AccessedPage));
1494 PteDst.u = 0;
1495 }
1496 else
1497 /*
1498 * If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
1499 * when the page is modified.
1500 */
1501 if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))
1502 {
1503 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPage));
1504 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
1505 | PGM_PAGE_GET_HCPHYS(pPage)
1506 | PGM_PTFLAGS_TRACK_DIRTY;
1507 }
1508 else
1509#endif
1510 {
1511 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageSkipped));
1512#if PGM_SHW_TYPE == PGM_TYPE_EPT
1513 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage);
1514 PteDst.n.u1Present = 1;
1515 PteDst.n.u1Write = 1;
1516 PteDst.n.u1Execute = 1;
1517 PteDst.n.u1IgnorePAT = 1;
1518 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB;
1519 /* PteDst.n.u1Size = 0 */
1520#else
1521 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1522 | PGM_PAGE_GET_HCPHYS(pPage);
1523#endif
1524 }
1525 }
1526
1527 /*
1528 * Make sure only allocated pages are mapped writable.
1529 */
1530 if ( PteDst.n.u1Write
1531 && PteDst.n.u1Present
1532 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
1533 {
1534 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet. */
1535 Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst));
1536 }
1537
1538#ifdef PGMPOOL_WITH_USER_TRACKING
1539 /*
1540 * Keep user track up to date.
1541 */
1542 if (PteDst.n.u1Present)
1543 {
1544 if (!pPteDst->n.u1Present)
1545 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
1546 else if ((pPteDst->u & SHW_PTE_PG_MASK) != (PteDst.u & SHW_PTE_PG_MASK))
1547 {
1548 Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
1549 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1550 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
1551 }
1552 }
1553 else if (pPteDst->n.u1Present)
1554 {
1555 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1556 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1557 }
1558#endif /* PGMPOOL_WITH_USER_TRACKING */
1559
1560 /*
1561 * Update statistics and commit the entry.
1562 */
1563#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
1564 if (!PteSrc.n.u1Global)
1565 pShwPage->fSeenNonGlobal = true;
1566#endif
1567 ASMAtomicWriteSize(pPteDst, PteDst.u);
1568 }
1569 /* else MMIO or invalid page, we must handle them manually in the #PF handler. */
1570 /** @todo count these. */
1571 }
1572 else
1573 {
1574 /*
1575 * Page not-present.
1576 */
1577 LogFlow(("SyncPageWorker: page not present in Pte\n"));
1578#ifdef PGMPOOL_WITH_USER_TRACKING
1579 /* Keep user track up to date. */
1580 if (pPteDst->n.u1Present)
1581 {
1582 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
1583 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
1584 }
1585#endif /* PGMPOOL_WITH_USER_TRACKING */
1586 ASMAtomicWriteSize(pPteDst, 0);
1587 /** @todo count these. */
1588 }
1589}
1590
1591
1592/**
1593 * Syncs a guest OS page.
1594 *
1595 * There are no conflicts at this point, neither is there any need for
1596 * page table allocations.
1597 *
1598 * @returns VBox status code.
1599 * @returns VINF_PGM_SYNCPAGE_MODIFIED_PDE if it modifies the PDE in any way.
1600 * @param pVCpu The VMCPU handle.
1601 * @param PdeSrc Page directory entry of the guest.
1602 * @param GCPtrPage Guest context page address.
1603 * @param cPages Number of pages to sync (PGM_SYNC_N_PAGES) (default=1).
1604 * @param uErr Fault error (X86_TRAP_PF_*).
1605 */
1606PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
1607{
1608 PVM pVM = pVCpu->CTX_SUFF(pVM);
1609 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1610 LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr));
1611
1612#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
1613 || PGM_GST_TYPE == PGM_TYPE_PAE \
1614 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
1615 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1616 && PGM_SHW_TYPE != PGM_TYPE_EPT
1617
1618# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1619 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVCpu) & MSR_K6_EFER_NXE);
1620# endif
1621
1622 /*
1623 * Assert preconditions.
1624 */
1625 Assert(PdeSrc.n.u1Present);
1626 Assert(cPages);
1627 STAM_COUNTER_INC(&pVCpu->pgm.s.StatSyncPagePD[(GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK]);
1628
1629 /*
1630 * Get the shadow PDE, find the shadow page table in the pool.
1631 */
1632# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1633 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1634 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVCpu->pgm.s, GCPtrPage);
1635
1636 /* Fetch the pgm pool shadow descriptor. */
1637 PPGMPOOLPAGE pShwPde = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1638 Assert(pShwPde);
1639
1640# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1641 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1642 PPGMPOOLPAGE pShwPde = NULL;
1643 PX86PDPAE pPDDst;
1644
1645 /* Fetch the pgm pool shadow descriptor. */
1646 int rc = pgmShwGetPaePoolPagePD(&pVCpu->pgm.s, GCPtrPage, &pShwPde);
1647 AssertRCSuccessReturn(rc, rc);
1648 Assert(pShwPde);
1649
1650 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
1651 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1652
1653# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1654 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
1655 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1656 PX86PDPAE pPDDst;
1657 PX86PDPT pPdptDst;
1658
1659 int rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1660 AssertRCSuccessReturn(rc, rc);
1661 Assert(pPDDst && pPdptDst);
1662 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
1663# endif
1664 SHWPDE PdeDst = *pPdeDst;
1665 AssertMsg(PdeDst.n.u1Present, ("%p=%llx\n", pPdeDst, (uint64_t)PdeDst.u));
1666 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
1667
1668# if PGM_GST_TYPE == PGM_TYPE_AMD64
1669 /* Fetch the pgm pool shadow descriptor. */
1670 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
1671 Assert(pShwPde);
1672# endif
1673
1674# if defined(IN_RC)
1675 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
1676 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
1677# endif
1678
1679 /*
1680 * Check that the page is present and that the shadow PDE isn't out of sync.
1681 */
1682# if PGM_GST_TYPE == PGM_TYPE_AMD64
1683 const bool fBigPage = PdeSrc.b.u1Size;
1684# else
1685 const bool fBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
1686# endif
1687 RTGCPHYS GCPhys;
1688 if (!fBigPage)
1689 {
1690 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
1691# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1692 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1693 GCPhys |= (iPDDst & 1) * (PAGE_SIZE/2);
1694# endif
1695 }
1696 else
1697 {
1698 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
1699# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1700 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
1701 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
1702# endif
1703 }
1704 if ( pShwPage->GCPhys == GCPhys
1705 && PdeSrc.n.u1Present
1706 && (PdeSrc.n.u1User == PdeDst.n.u1User)
1707 && (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write)
1708# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
1709 && (!fNoExecuteBitValid || PdeSrc.n.u1NoExecute == PdeDst.n.u1NoExecute)
1710# endif
1711 )
1712 {
1713 /*
1714 * Check that the PDE is marked accessed already.
1715 * Since we set the accessed bit *before* getting here on a #PF, this
1716 * check is only meant for dealing with non-#PF'ing paths.
1717 */
1718 if (PdeSrc.n.u1Accessed)
1719 {
1720 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1721 if (!fBigPage)
1722 {
1723 /*
1724 * 4KB Page - Map the guest page table.
1725 */
1726 PGSTPT pPTSrc;
1727 int rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
1728 if (RT_SUCCESS(rc))
1729 {
1730# ifdef PGM_SYNC_N_PAGES
1731 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1732 if ( cPages > 1
1733 && !(uErr & X86_TRAP_PF_P)
1734 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1735 {
1736 /*
1737 * This code path is currently only taken when the caller is PGMTrap0eHandler
1738 * for non-present pages!
1739 *
1740 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1741 * deal with locality.
1742 */
1743 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1744# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
1745 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
1746 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
1747# else
1748 const unsigned offPTSrc = 0;
1749# endif
1750 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1751 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1752 iPTDst = 0;
1753 else
1754 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1755 for (; iPTDst < iPTDstEnd; iPTDst++)
1756 {
1757 if (!pPTDst->a[iPTDst].n.u1Present)
1758 {
1759 GSTPTE PteSrc = pPTSrc->a[offPTSrc + iPTDst];
1760 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
1761 NOREF(GCPtrCurPage);
1762#ifndef IN_RING0
1763 /*
1764 * Assuming kernel code will be marked as supervisor - and not as user level
1765 * and executed using a conforming code selector - And marked as readonly.
1766 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
1767 */
1768 PPGMPAGE pPage;
1769 if ( ((PdeSrc.u & PteSrc.u) & (X86_PTE_RW | X86_PTE_US))
1770 || iPTDst == ((GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK) /* always sync GCPtrPage */
1771 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)GCPtrCurPage)
1772 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
1773 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1774 )
1775#endif /* else: CSAM not active */
1776 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1777 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
1778 GCPtrCurPage, PteSrc.n.u1Present,
1779 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1780 PteSrc.n.u1User & PdeSrc.n.u1User,
1781 (uint64_t)PteSrc.u,
1782 (uint64_t)pPTDst->a[iPTDst].u,
1783 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1784 }
1785 }
1786 }
1787 else
1788# endif /* PGM_SYNC_N_PAGES */
1789 {
1790 const unsigned iPTSrc = (GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK;
1791 GSTPTE PteSrc = pPTSrc->a[iPTSrc];
1792 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1793 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
1794 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s\n",
1795 GCPtrPage, PteSrc.n.u1Present,
1796 PteSrc.n.u1Write & PdeSrc.n.u1Write,
1797 PteSrc.n.u1User & PdeSrc.n.u1User,
1798 (uint64_t)PteSrc.u,
1799 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1800 }
1801 }
1802 else /* MMIO or invalid page: emulated in #PF handler. */
1803 {
1804 LogFlow(("PGM_GCPHYS_2_PTR %RGp failed with %Rrc\n", GCPhys, rc));
1805 Assert(!pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK].n.u1Present);
1806 }
1807 }
1808 else
1809 {
1810 /*
1811 * 4/2MB page - lazy syncing shadow 4K pages.
1812 * (There are many causes of getting here, it's no longer only CSAM.)
1813 */
1814 /* Calculate the GC physical address of this 4KB shadow page. */
1815 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
1816 /* Find ram range. */
1817 PPGMPAGE pPage;
1818 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1819 if (RT_SUCCESS(rc))
1820 {
1821# ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
1822 /* Try make the page writable if necessary. */
1823 if ( PdeSrc.n.u1Write
1824 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1825 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1826 {
1827 rc = pgmPhysPageMakeWritableUnlocked(pVM, pPage, GCPhys);
1828 AssertRC(rc);
1829 }
1830# endif
1831
1832 /*
1833 * Make shadow PTE entry.
1834 */
1835 SHWPTE PteDst;
1836 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1837 | PGM_PAGE_GET_HCPHYS(pPage);
1838 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
1839 {
1840 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
1841 PteDst.n.u1Write = 0;
1842 else
1843 PteDst.u = 0;
1844 }
1845 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1846# ifdef PGMPOOL_WITH_USER_TRACKING
1847 if (PteDst.n.u1Present && !pPTDst->a[iPTDst].n.u1Present)
1848 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
1849# endif
1850 /* Make sure only allocated pages are mapped writable. */
1851 if ( PteDst.n.u1Write
1852 && PteDst.n.u1Present
1853 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
1854 {
1855 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet... */
1856 Log3(("SyncPage: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, GCPtrPage));
1857 }
1858
1859 pPTDst->a[iPTDst] = PteDst;
1860
1861
1862 /*
1863 * If the page is not flagged as dirty and is writable, then make it read-only
1864 * at PD level, so we can set the dirty bit when the page is modified.
1865 *
1866 * ASSUMES that page access handlers are implemented on page table entry level.
1867 * Thus we will first catch the dirty access and set PDE.D and restart. If
1868 * there is an access handler, we'll trap again and let it work on the problem.
1869 */
1870 /** @todo r=bird: figure out why we need this here, SyncPT should've taken care of this already.
1871 * As for invlpg, it simply frees the whole shadow PT.
1872 * ...It's possibly because the guest clears it and the guest doesn't really tell us... */
1873 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
1874 {
1875 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
1876 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
1877 PdeDst.n.u1Write = 0;
1878 }
1879 else
1880 {
1881 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
1882 PdeDst.n.u1Write = PdeSrc.n.u1Write;
1883 }
1884 ASMAtomicWriteSize(pPdeDst, PdeDst.u);
1885 Log2(("SyncPage: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} GCPhys=%RGp%s\n",
1886 GCPtrPage, PdeSrc.n.u1Present, PdeSrc.n.u1Write, PdeSrc.n.u1User, (uint64_t)PdeSrc.u, GCPhys,
1887 PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
1888 }
1889 else
1890 LogFlow(("PGM_GCPHYS_2_PTR %RGp (big) failed with %Rrc\n", GCPhys, rc));
1891 }
1892# if defined(IN_RC)
1893 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
1894 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
1895# endif
1896 return VINF_SUCCESS;
1897 }
1898 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDNAs));
1899 }
1900 else
1901 {
1902 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync));
1903 Log2(("SyncPage: Out-Of-Sync PDE at %RGp PdeSrc=%RX64 PdeDst=%RX64 (GCPhys %RGp vs %RGp)\n",
1904 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys));
1905 }
1906
1907 /*
1908 * Mark the PDE not present. Restart the instruction and let #PF call SyncPT.
1909 * Yea, I'm lazy.
1910 */
1911 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
1912 ASMAtomicWriteSize(pPdeDst, 0);
1913
1914# if defined(IN_RC)
1915 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
1916 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
1917# endif
1918 PGM_INVL_GUEST_TLBS();
1919 return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
1920
1921#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
1922 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
1923 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) \
1924 && !defined(IN_RC)
1925
1926# ifdef PGM_SYNC_N_PAGES
1927 /*
1928 * Get the shadow PDE, find the shadow page table in the pool.
1929 */
1930# if PGM_SHW_TYPE == PGM_TYPE_32BIT
1931 X86PDE PdeDst = pgmShwGet32BitPDE(&pVCpu->pgm.s, GCPtrPage);
1932
1933# elif PGM_SHW_TYPE == PGM_TYPE_PAE
1934 X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVCpu->pgm.s, GCPtrPage);
1935
1936# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
1937 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1938 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; NOREF(iPdpt);
1939 PX86PDPAE pPDDst;
1940 X86PDEPAE PdeDst;
1941 PX86PDPT pPdptDst;
1942
1943 int rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
1944 AssertRCSuccessReturn(rc, rc);
1945 Assert(pPDDst && pPdptDst);
1946 PdeDst = pPDDst->a[iPDDst];
1947# elif PGM_SHW_TYPE == PGM_TYPE_EPT
1948 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
1949 PEPTPD pPDDst;
1950 EPTPDE PdeDst;
1951
1952 int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtrPage, NULL, &pPDDst);
1953 if (rc != VINF_SUCCESS)
1954 {
1955 AssertRC(rc);
1956 return rc;
1957 }
1958 Assert(pPDDst);
1959 PdeDst = pPDDst->a[iPDDst];
1960# endif
1961 AssertMsg(PdeDst.n.u1Present, ("%#llx\n", (uint64_t)PdeDst.u));
1962 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
1963 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1964
1965 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P));
1966 if ( cPages > 1
1967 && !(uErr & X86_TRAP_PF_P)
1968 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1969 {
1970 /*
1971 * This code path is currently only taken when the caller is PGMTrap0eHandler
1972 * for non-present pages!
1973 *
1974 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
1975 * deal with locality.
1976 */
1977 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
1978 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
1979 if (iPTDst < PGM_SYNC_NR_PAGES / 2)
1980 iPTDst = 0;
1981 else
1982 iPTDst -= PGM_SYNC_NR_PAGES / 2;
1983 for (; iPTDst < iPTDstEnd; iPTDst++)
1984 {
1985 if (!pPTDst->a[iPTDst].n.u1Present)
1986 {
1987 GSTPTE PteSrc;
1988
1989 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
1990
1991 /* Fake the page table entry */
1992 PteSrc.u = GCPtrCurPage;
1993 PteSrc.n.u1Present = 1;
1994 PteSrc.n.u1Dirty = 1;
1995 PteSrc.n.u1Accessed = 1;
1996 PteSrc.n.u1Write = 1;
1997 PteSrc.n.u1User = 1;
1998
1999 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2000
2001 Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
2002 GCPtrCurPage, PteSrc.n.u1Present,
2003 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2004 PteSrc.n.u1User & PdeSrc.n.u1User,
2005 (uint64_t)PteSrc.u,
2006 (uint64_t)pPTDst->a[iPTDst].u,
2007 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2008
2009 if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)))
2010 break;
2011 }
2012 else
2013 Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, pPTDst->a[iPTDst].u));
2014 }
2015 }
2016 else
2017# endif /* PGM_SYNC_N_PAGES */
2018 {
2019 GSTPTE PteSrc;
2020 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2021 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
2022
2023 /* Fake the page table entry */
2024 PteSrc.u = GCPtrCurPage;
2025 PteSrc.n.u1Present = 1;
2026 PteSrc.n.u1Dirty = 1;
2027 PteSrc.n.u1Accessed = 1;
2028 PteSrc.n.u1Write = 1;
2029 PteSrc.n.u1User = 1;
2030 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2031
2032 Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}PteDst=%08llx%s\n",
2033 GCPtrPage, PteSrc.n.u1Present,
2034 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2035 PteSrc.n.u1User & PdeSrc.n.u1User,
2036 (uint64_t)PteSrc.u,
2037 (uint64_t)pPTDst->a[iPTDst].u,
2038 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2039 }
2040 return VINF_SUCCESS;
2041
2042#else
2043 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
2044 return VERR_INTERNAL_ERROR;
2045#endif
2046}
2047
2048
2049#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2050/**
2051 * Investigate page fault and handle write protection page faults caused by
2052 * dirty bit tracking.
2053 *
2054 * @returns VBox status code.
2055 * @param pVCpu The VMCPU handle.
2056 * @param uErr Page fault error code.
2057 * @param pPdeDst Shadow page directory entry.
2058 * @param pPdeSrc Guest page directory entry.
2059 * @param GCPtrPage Guest context page address.
2060 */
2061PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
2062{
2063 bool fWriteProtect = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP);
2064 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US);
2065 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW);
2066# if PGM_GST_TYPE == PGM_TYPE_AMD64
2067 bool fBigPagesSupported = true;
2068# else
2069 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
2070# endif
2071# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2072 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVCpu) & MSR_K6_EFER_NXE);
2073# endif
2074 unsigned uPageFaultLevel;
2075 int rc;
2076 PVM pVM = pVCpu->CTX_SUFF(pVM);
2077 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2078
2079 Assert(PGMIsLockOwner(pVM));
2080
2081 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2082 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
2083
2084# if PGM_GST_TYPE == PGM_TYPE_PAE \
2085 || PGM_GST_TYPE == PGM_TYPE_AMD64
2086
2087# if PGM_GST_TYPE == PGM_TYPE_AMD64
2088 PX86PML4E pPml4eSrc;
2089 PX86PDPE pPdpeSrc;
2090
2091 pPdpeSrc = pgmGstGetLongModePDPTPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc);
2092 Assert(pPml4eSrc);
2093
2094 /*
2095 * Real page fault? (PML4E level)
2096 */
2097 if ( (uErr & X86_TRAP_PF_RSVD)
2098 || !pPml4eSrc->n.u1Present
2099 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPml4eSrc->n.u1NoExecute)
2100 || (fWriteFault && !pPml4eSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2101 || (fUserLevelFault && !pPml4eSrc->n.u1User)
2102 )
2103 {
2104 uPageFaultLevel = 0;
2105 goto l_UpperLevelPageFault;
2106 }
2107 Assert(pPdpeSrc);
2108
2109# else /* PAE */
2110 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(&pVCpu->pgm.s, GCPtrPage);
2111# endif /* PAE */
2112
2113 /*
2114 * Real page fault? (PDPE level)
2115 */
2116 if ( (uErr & X86_TRAP_PF_RSVD)
2117 || !pPdpeSrc->n.u1Present
2118# if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */
2119 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdpeSrc->lm.u1NoExecute)
2120 || (fWriteFault && !pPdpeSrc->lm.u1Write && (fUserLevelFault || fWriteProtect))
2121 || (fUserLevelFault && !pPdpeSrc->lm.u1User)
2122# endif
2123 )
2124 {
2125 uPageFaultLevel = 1;
2126 goto l_UpperLevelPageFault;
2127 }
2128# endif
2129
2130 /*
2131 * Real page fault? (PDE level)
2132 */
2133 if ( (uErr & X86_TRAP_PF_RSVD)
2134 || !pPdeSrc->n.u1Present
2135# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2136 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && pPdeSrc->n.u1NoExecute)
2137# endif
2138 || (fWriteFault && !pPdeSrc->n.u1Write && (fUserLevelFault || fWriteProtect))
2139 || (fUserLevelFault && !pPdeSrc->n.u1User) )
2140 {
2141 uPageFaultLevel = 2;
2142 goto l_UpperLevelPageFault;
2143 }
2144
2145 /*
2146 * First check the easy case where the page directory has been marked read-only to track
2147 * the dirty bit of an emulated BIG page
2148 */
2149 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2150 {
2151 /* Mark guest page directory as accessed */
2152# if PGM_GST_TYPE == PGM_TYPE_AMD64
2153 pPml4eSrc->n.u1Accessed = 1;
2154 pPdpeSrc->lm.u1Accessed = 1;
2155# endif
2156 pPdeSrc->b.u1Accessed = 1;
2157
2158 /*
2159 * Only write protection page faults are relevant here.
2160 */
2161 if (fWriteFault)
2162 {
2163 /* Mark guest page directory as dirty (BIG page only). */
2164 pPdeSrc->b.u1Dirty = 1;
2165
2166 if ( pPdeDst->n.u1Present
2167 && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
2168 {
2169 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2170 Assert(pPdeSrc->b.u1Write);
2171
2172 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
2173 * fault again and take this path to only invalidate the entry.
2174 */
2175 pPdeDst->n.u1Write = 1;
2176 pPdeDst->n.u1Accessed = 1;
2177 pPdeDst->au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;
2178 PGM_INVL_BIG_PG(GCPtrPage);
2179 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2180 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2181 }
2182 }
2183 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2184 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2185 }
2186 /* else: 4KB page table */
2187
2188 /*
2189 * Map the guest page table.
2190 */
2191 PGSTPT pPTSrc;
2192 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2193 if (RT_SUCCESS(rc))
2194 {
2195 /*
2196 * Real page fault?
2197 */
2198 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2199 const GSTPTE PteSrc = *pPteSrc;
2200 if ( !PteSrc.n.u1Present
2201# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
2202 || (fNoExecuteBitValid && (uErr & X86_TRAP_PF_ID) && PteSrc.n.u1NoExecute)
2203# endif
2204 || (fWriteFault && !PteSrc.n.u1Write && (fUserLevelFault || fWriteProtect))
2205 || (fUserLevelFault && !PteSrc.n.u1User)
2206 )
2207 {
2208 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2209 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2210 LogFlow(("CheckPageFault: real page fault at %RGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u));
2211
2212 /* Check the present bit as the shadow tables can cause different error codes by being out of sync.
2213 * See the 2nd case above as well.
2214 */
2215 if (pPdeSrc->n.u1Present && pPteSrc->n.u1Present)
2216 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2217
2218 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2219 return VINF_EM_RAW_GUEST_TRAP;
2220 }
2221 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
2222
2223 /*
2224 * Set the accessed bits in the page directory and the page table.
2225 */
2226# if PGM_GST_TYPE == PGM_TYPE_AMD64
2227 pPml4eSrc->n.u1Accessed = 1;
2228 pPdpeSrc->lm.u1Accessed = 1;
2229# endif
2230 pPdeSrc->n.u1Accessed = 1;
2231 pPteSrc->n.u1Accessed = 1;
2232
2233 /*
2234 * Only write protection page faults are relevant here.
2235 */
2236 if (fWriteFault)
2237 {
2238 /* Write access, so mark guest entry as dirty. */
2239# ifdef VBOX_WITH_STATISTICS
2240 if (!pPteSrc->n.u1Dirty)
2241 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtiedPage));
2242 else
2243 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageAlreadyDirty));
2244# endif
2245
2246 pPteSrc->n.u1Dirty = 1;
2247
2248 if (pPdeDst->n.u1Present)
2249 {
2250#ifndef IN_RING0
2251 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.
2252 * Our individual shadow handlers will provide more information and force a fatal exit.
2253 */
2254 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))
2255 {
2256 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage));
2257 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2258 return VINF_SUCCESS;
2259 }
2260#endif
2261 /*
2262 * Map shadow page table.
2263 */
2264 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK);
2265 if (pShwPage)
2266 {
2267 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2268 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2269 if (pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */
2270 {
2271 if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)
2272 {
2273 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
2274# ifdef VBOX_STRICT
2275 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
2276 if (pPage)
2277 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
2278 ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
2279# endif
2280 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
2281
2282 Assert(pPteSrc->n.u1Write);
2283
2284 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
2285 * fault again and take this path to only invalidate the entry.
2286 */
2287 pPteDst->n.u1Write = 1;
2288 pPteDst->n.u1Dirty = 1;
2289 pPteDst->n.u1Accessed = 1;
2290 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY;
2291 PGM_INVL_PG(GCPtrPage);
2292
2293 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2294 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2295 }
2296 else
2297 if ( pPteDst->n.u1Write == 1
2298 && pPteDst->n.u1Accessed == 1)
2299 {
2300 /* Stale TLB entry. */
2301 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
2302 PGM_INVL_PG(GCPtrPage);
2303
2304 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2305 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
2306 }
2307 }
2308 }
2309 else
2310 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));
2311 }
2312 }
2313/** @todo Optimize accessed bit emulation? */
2314# ifdef VBOX_STRICT
2315 /*
2316 * Sanity check.
2317 */
2318 else if ( !pPteSrc->n.u1Dirty
2319 && (pPdeSrc->n.u1Write & pPteSrc->n.u1Write)
2320 && pPdeDst->n.u1Present)
2321 {
2322 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK);
2323 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2324 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
2325 if ( pPteDst->n.u1Present
2326 && pPteDst->n.u1Write)
2327 LogFlow(("Writable present page %RGv not marked for dirty bit tracking!!!\n", GCPtrPage));
2328 }
2329# endif /* VBOX_STRICT */
2330 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2331 return VINF_PGM_NO_DIRTY_BIT_TRACKING;
2332 }
2333 AssertRC(rc);
2334 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2335 return rc;
2336
2337
2338l_UpperLevelPageFault:
2339 /*
2340 * Pagefault detected while checking the PML4E, PDPE or PDE.
2341 * Single exit handler to get rid of duplicate code paths.
2342 */
2343 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
2344 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
2345 Log(("CheckPageFault: real page fault at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
2346
2347 if (
2348# if PGM_GST_TYPE == PGM_TYPE_AMD64
2349 pPml4eSrc->n.u1Present &&
2350# endif
2351# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
2352 pPdpeSrc->n.u1Present &&
2353# endif
2354 pPdeSrc->n.u1Present)
2355 {
2356 /* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
2357 if (pPdeSrc->b.u1Size && fBigPagesSupported)
2358 {
2359 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2360 }
2361 else
2362 {
2363 /*
2364 * Map the guest page table.
2365 */
2366 PGSTPT pPTSrc;
2367 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
2368 if (RT_SUCCESS(rc))
2369 {
2370 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
2371 const GSTPTE PteSrc = *pPteSrc;
2372 if (pPteSrc->n.u1Present)
2373 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */
2374 }
2375 AssertRC(rc);
2376 }
2377 }
2378 return VINF_EM_RAW_GUEST_TRAP;
2379}
2380#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
2381
2382
2383/**
2384 * Sync a shadow page table.
2385 *
2386 * The shadow page table is not present. This includes the case where
2387 * there is a conflict with a mapping.
2388 *
2389 * @returns VBox status code.
2390 * @param pVCpu The VMCPU handle.
2391 * @param iPD Page directory index.
2392 * @param pPDSrc Source page directory (i.e. Guest OS page directory).
2393 * Assume this is a temporary mapping.
2394 * @param GCPtrPage GC Pointer of the page that caused the fault
2395 */
2396PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
2397{
2398 PVM pVM = pVCpu->CTX_SUFF(pVM);
2399 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2400
2401 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2402 STAM_COUNTER_INC(&pVCpu->pgm.s.StatSyncPtPD[iPDSrc]);
2403 LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage));
2404
2405 Assert(PGMIsLocked(pVM));
2406
2407#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
2408 || PGM_GST_TYPE == PGM_TYPE_PAE \
2409 || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2410 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2411 && PGM_SHW_TYPE != PGM_TYPE_EPT
2412
2413 int rc = VINF_SUCCESS;
2414
2415 /*
2416 * Validate input a little bit.
2417 */
2418 AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%RGv\n", iPDSrc, GCPtrPage));
2419# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2420 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT;
2421 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVCpu->pgm.s, GCPtrPage);
2422
2423 /* Fetch the pgm pool shadow descriptor. */
2424 PPGMPOOLPAGE pShwPde = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2425 Assert(pShwPde);
2426
2427# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2428 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2429 PPGMPOOLPAGE pShwPde = NULL;
2430 PX86PDPAE pPDDst;
2431 PSHWPDE pPdeDst;
2432
2433 /* Fetch the pgm pool shadow descriptor. */
2434 rc = pgmShwGetPaePoolPagePD(&pVCpu->pgm.s, GCPtrPage, &pShwPde);
2435 AssertRCSuccessReturn(rc, rc);
2436 Assert(pShwPde);
2437
2438 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2439 pPdeDst = &pPDDst->a[iPDDst];
2440
2441# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2442 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2443 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2444 PX86PDPAE pPDDst;
2445 PX86PDPT pPdptDst;
2446 rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2447 AssertRCSuccessReturn(rc, rc);
2448 Assert(pPDDst);
2449 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2450# endif
2451 SHWPDE PdeDst = *pPdeDst;
2452
2453# if PGM_GST_TYPE == PGM_TYPE_AMD64
2454 /* Fetch the pgm pool shadow descriptor. */
2455 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2456 Assert(pShwPde);
2457# endif
2458
2459# ifndef PGM_WITHOUT_MAPPINGS
2460 /*
2461 * Check for conflicts.
2462 * GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
2463 * HC: Simply resolve the conflict.
2464 */
2465 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
2466 {
2467 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
2468# ifndef IN_RING3
2469 Log(("SyncPT: Conflict at %RGv\n", GCPtrPage));
2470 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2471 return VERR_ADDRESS_CONFLICT;
2472# else
2473 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage);
2474 Assert(pMapping);
2475# if PGM_GST_TYPE == PGM_TYPE_32BIT
2476 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2477# elif PGM_GST_TYPE == PGM_TYPE_PAE
2478 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
2479# else
2480 AssertFailed(); /* can't happen for amd64 */
2481# endif
2482 if (RT_FAILURE(rc))
2483 {
2484 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2485 return rc;
2486 }
2487 PdeDst = *pPdeDst;
2488# endif
2489 }
2490# else /* PGM_WITHOUT_MAPPINGS */
2491 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
2492# endif /* PGM_WITHOUT_MAPPINGS */
2493 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2494
2495# if defined(IN_RC)
2496 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
2497 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
2498# endif
2499
2500 /*
2501 * Sync page directory entry.
2502 */
2503 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2504 if (PdeSrc.n.u1Present)
2505 {
2506 /*
2507 * Allocate & map the page table.
2508 */
2509 PSHWPT pPTDst;
2510# if PGM_GST_TYPE == PGM_TYPE_AMD64
2511 const bool fPageTable = !PdeSrc.b.u1Size;
2512# else
2513 const bool fPageTable = !PdeSrc.b.u1Size || !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
2514# endif
2515 PPGMPOOLPAGE pShwPage;
2516 RTGCPHYS GCPhys;
2517 if (fPageTable)
2518 {
2519 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
2520# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2521 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2522 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);
2523# endif
2524 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2525 }
2526 else
2527 {
2528 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
2529# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2530 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
2531 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
2532# endif
2533 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx, iPDDst, &pShwPage);
2534 }
2535 if (rc == VINF_SUCCESS)
2536 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2537 else if (rc == VINF_PGM_CACHED_PAGE)
2538 {
2539 /*
2540 * The PT was cached, just hook it up.
2541 */
2542 if (fPageTable)
2543 PdeDst.u = pShwPage->Core.Key
2544 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2545 else
2546 {
2547 PdeDst.u = pShwPage->Core.Key
2548 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2549 /* (see explanation and assumptions further down.) */
2550 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2551 {
2552 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2553 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2554 PdeDst.b.u1Write = 0;
2555 }
2556 }
2557 ASMAtomicWriteSize(pPdeDst, PdeDst.u);
2558# if defined(IN_RC)
2559 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2560# endif
2561 return VINF_SUCCESS;
2562 }
2563 else if (rc == VERR_PGM_POOL_FLUSHED)
2564 {
2565 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2566# if defined(IN_RC)
2567 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2568# endif
2569 return VINF_PGM_SYNC_CR3;
2570 }
2571 else
2572 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2573 PdeDst.u &= X86_PDE_AVL_MASK;
2574 PdeDst.u |= pShwPage->Core.Key;
2575
2576 /*
2577 * Page directory has been accessed (this is a fault situation, remember).
2578 */
2579 pPDSrc->a[iPDSrc].n.u1Accessed = 1;
2580 if (fPageTable)
2581 {
2582 /*
2583 * Page table - 4KB.
2584 *
2585 * Sync all or just a few entries depending on PGM_SYNC_N_PAGES.
2586 */
2587 Log2(("SyncPT: 4K %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx}\n",
2588 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u));
2589 PGSTPT pPTSrc;
2590 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
2591 if (RT_SUCCESS(rc))
2592 {
2593 /*
2594 * Start by syncing the page directory entry so CSAM's TLB trick works.
2595 */
2596 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | X86_PDE_AVL_MASK))
2597 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2598 ASMAtomicWriteSize(pPdeDst, PdeDst.u);
2599# if defined(IN_RC)
2600 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2601# endif
2602
2603 /*
2604 * Directory/page user or supervisor privilege: (same goes for read/write)
2605 *
2606 * Directory Page Combined
2607 * U/S U/S U/S
2608 * 0 0 0
2609 * 0 1 0
2610 * 1 0 0
2611 * 1 1 1
2612 *
2613 * Simple AND operation. Table listed for completeness.
2614 *
2615 */
2616 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT4K));
2617# ifdef PGM_SYNC_N_PAGES
2618 unsigned iPTBase = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
2619 unsigned iPTDst = iPTBase;
2620 const unsigned iPTDstEnd = RT_MIN(iPTDst + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPTDst->a));
2621 if (iPTDst <= PGM_SYNC_NR_PAGES / 2)
2622 iPTDst = 0;
2623 else
2624 iPTDst -= PGM_SYNC_NR_PAGES / 2;
2625# else /* !PGM_SYNC_N_PAGES */
2626 unsigned iPTDst = 0;
2627 const unsigned iPTDstEnd = RT_ELEMENTS(pPTDst->a);
2628# endif /* !PGM_SYNC_N_PAGES */
2629# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
2630 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
2631 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
2632# else
2633 const unsigned offPTSrc = 0;
2634# endif
2635 for (; iPTDst < iPTDstEnd; iPTDst++)
2636 {
2637 const unsigned iPTSrc = iPTDst + offPTSrc;
2638 const GSTPTE PteSrc = pPTSrc->a[iPTSrc];
2639
2640 if (PteSrc.n.u1Present) /* we've already cleared it above */
2641 {
2642# ifndef IN_RING0
2643 /*
2644 * Assuming kernel code will be marked as supervisor - and not as user level
2645 * and executed using a conforming code selector - And marked as readonly.
2646 * Also assume that if we're monitoring a page, it's of no interest to CSAM.
2647 */
2648 PPGMPAGE pPage;
2649 if ( ((PdeSrc.u & pPTSrc->a[iPTSrc].u) & (X86_PTE_RW | X86_PTE_US))
2650 || !CSAMDoesPageNeedScanning(pVM, (RTRCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)))
2651 || ( (pPage = pgmPhysGetPage(&pVM->pgm.s, PteSrc.u & GST_PTE_PG_MASK))
2652 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2653 )
2654# endif
2655 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
2656 Log2(("SyncPT: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n",
2657 (RTGCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)),
2658 PteSrc.n.u1Present,
2659 PteSrc.n.u1Write & PdeSrc.n.u1Write,
2660 PteSrc.n.u1User & PdeSrc.n.u1User,
2661 (uint64_t)PteSrc.u,
2662 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],
2663 (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) ));
2664 }
2665 } /* for PTEs */
2666 }
2667 }
2668 else
2669 {
2670 /*
2671 * Big page - 2/4MB.
2672 *
2673 * We'll walk the ram range list in parallel and optimize lookups.
2674 * We will only sync on shadow page table at a time.
2675 */
2676 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT4M));
2677
2678 /**
2679 * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
2680 */
2681
2682 /*
2683 * Start by syncing the page directory entry.
2684 */
2685 PdeDst.u = (PdeDst.u & (SHW_PDE_PG_MASK | (X86_PDE_AVL_MASK & ~PGM_PDFLAGS_TRACK_DIRTY)))
2686 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
2687
2688 /*
2689 * If the page is not flagged as dirty and is writable, then make it read-only
2690 * at PD level, so we can set the dirty bit when the page is modified.
2691 *
2692 * ASSUMES that page access handlers are implemented on page table entry level.
2693 * Thus we will first catch the dirty access and set PDE.D and restart. If
2694 * there is an access handler, we'll trap again and let it work on the problem.
2695 */
2696 /** @todo move the above stuff to a section in the PGM documentation. */
2697 Assert(!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY));
2698 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
2699 {
2700 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageBig));
2701 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY;
2702 PdeDst.b.u1Write = 0;
2703 }
2704 ASMAtomicWriteSize(pPdeDst, PdeDst.u);
2705# if defined(IN_RC)
2706 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
2707# endif
2708
2709 /*
2710 * Fill the shadow page table.
2711 */
2712 /* Get address and flags from the source PDE. */
2713 SHWPTE PteDstBase;
2714 PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
2715
2716 /* Loop thru the entries in the shadow PT. */
2717 const RTGCPTR GCPtr = (GCPtrPage >> SHW_PD_SHIFT) << SHW_PD_SHIFT; NOREF(GCPtr);
2718 Log2(("SyncPT: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} Shw=%RGv GCPhys=%RGp %s\n",
2719 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr,
2720 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2721 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2722 unsigned iPTDst = 0;
2723 while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2724 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
2725 {
2726 /* Advance ram range list. */
2727 while (pRam && GCPhys > pRam->GCPhysLast)
2728 pRam = pRam->CTX_SUFF(pNext);
2729 if (pRam && GCPhys >= pRam->GCPhys)
2730 {
2731 unsigned iHCPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2732 do
2733 {
2734 /* Make shadow PTE. */
2735 PPGMPAGE pPage = &pRam->aPages[iHCPage];
2736 SHWPTE PteDst;
2737
2738# ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
2739 /* Try make the page writable if necessary. */
2740 if ( PteDstBase.n.u1Write
2741 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
2742 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
2743 {
2744 rc = pgmPhysPageMakeWritableUnlocked(pVM, pPage, GCPhys);
2745 AssertRCReturn(rc, rc);
2746 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
2747 break;
2748 }
2749# endif
2750
2751 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2752 {
2753 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
2754 {
2755 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2756 PteDst.n.u1Write = 0;
2757 }
2758 else
2759 PteDst.u = 0;
2760 }
2761# ifndef IN_RING0
2762 /*
2763 * Assuming kernel code will be marked as supervisor and not as user level and executed
2764 * using a conforming code selector. Don't check for readonly, as that implies the whole
2765 * 4MB can be code or readonly data. Linux enables write access for its large pages.
2766 */
2767 else if ( !PdeSrc.n.u1User
2768 && CSAMDoesPageNeedScanning(pVM, (RTRCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))))
2769 PteDst.u = 0;
2770# endif
2771 else
2772 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage) | PteDstBase.u;
2773
2774 /* Only map writable pages writable. */
2775 if ( PteDst.n.u1Write
2776 && PteDst.n.u1Present
2777 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
2778 {
2779 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet... */
2780 Log3(("SyncPT: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))));
2781 }
2782
2783# ifdef PGMPOOL_WITH_USER_TRACKING
2784 if (PteDst.n.u1Present)
2785 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
2786# endif
2787 /* commit it */
2788 pPTDst->a[iPTDst] = PteDst;
2789 Log4(("SyncPT: BIG %RGv PteDst:{P=%d RW=%d U=%d raw=%08llx}%s\n",
2790 (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,
2791 PteDst.u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
2792
2793 /* advance */
2794 GCPhys += PAGE_SIZE;
2795 iHCPage++;
2796 iPTDst++;
2797 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2798 && GCPhys <= pRam->GCPhysLast);
2799 }
2800 else if (pRam)
2801 {
2802 Log(("Invalid pages at %RGp\n", GCPhys));
2803 do
2804 {
2805 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2806 GCPhys += PAGE_SIZE;
2807 iPTDst++;
2808 } while ( iPTDst < RT_ELEMENTS(pPTDst->a)
2809 && GCPhys < pRam->GCPhys);
2810 }
2811 else
2812 {
2813 Log(("Invalid pages at %RGp (2)\n", GCPhys));
2814 for ( ; iPTDst < RT_ELEMENTS(pPTDst->a); iPTDst++)
2815 pPTDst->a[iPTDst].u = 0; /* MMIO or invalid page, we must handle them manually. */
2816 }
2817 } /* while more PTEs */
2818 } /* 4KB / 4MB */
2819 }
2820 else
2821 AssertRelease(!PdeDst.n.u1Present);
2822
2823 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2824 if (RT_FAILURE(rc))
2825 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPTFailed));
2826 return rc;
2827
2828#elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
2829 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
2830 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) \
2831 && !defined(IN_RC)
2832
2833 /*
2834 * Validate input a little bit.
2835 */
2836 int rc = VINF_SUCCESS;
2837# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2838 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2839 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVCpu->pgm.s, GCPtrPage);
2840
2841 /* Fetch the pgm pool shadow descriptor. */
2842 PPGMPOOLPAGE pShwPde = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2843 Assert(pShwPde);
2844
2845# elif PGM_SHW_TYPE == PGM_TYPE_PAE
2846 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2847 PPGMPOOLPAGE pShwPde;
2848 PX86PDPAE pPDDst;
2849 PSHWPDE pPdeDst;
2850
2851 /* Fetch the pgm pool shadow descriptor. */
2852 rc = pgmShwGetPaePoolPagePD(&pVCpu->pgm.s, GCPtrPage, &pShwPde);
2853 AssertRCSuccessReturn(rc, rc);
2854 Assert(pShwPde);
2855
2856 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
2857 pPdeDst = &pPDDst->a[iPDDst];
2858
2859# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
2860 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
2861 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
2862 PX86PDPAE pPDDst;
2863 PX86PDPT pPdptDst;
2864 rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
2865 AssertRCSuccessReturn(rc, rc);
2866 Assert(pPDDst);
2867 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2868
2869 /* Fetch the pgm pool shadow descriptor. */
2870 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
2871 Assert(pShwPde);
2872
2873# elif PGM_SHW_TYPE == PGM_TYPE_EPT
2874 const unsigned iPdpt = (GCPtrPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
2875 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
2876 PEPTPD pPDDst;
2877 PEPTPDPT pPdptDst;
2878
2879 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtrPage, &pPdptDst, &pPDDst);
2880 if (rc != VINF_SUCCESS)
2881 {
2882 AssertRC(rc);
2883 return rc;
2884 }
2885 Assert(pPDDst);
2886 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];
2887
2888 /* Fetch the pgm pool shadow descriptor. */
2889 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & EPT_PDPTE_PG_MASK);
2890 Assert(pShwPde);
2891# endif
2892 SHWPDE PdeDst = *pPdeDst;
2893
2894 Assert(!(PdeDst.u & PGM_PDFLAGS_MAPPING));
2895 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
2896
2897 GSTPDE PdeSrc;
2898 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2899 PdeSrc.n.u1Present = 1;
2900 PdeSrc.n.u1Write = 1;
2901 PdeSrc.n.u1Accessed = 1;
2902 PdeSrc.n.u1User = 1;
2903
2904 /*
2905 * Allocate & map the page table.
2906 */
2907 PSHWPT pPTDst;
2908 PPGMPOOLPAGE pShwPage;
2909 RTGCPHYS GCPhys;
2910
2911 /* Virtual address = physical address */
2912 GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK;
2913 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
2914
2915 if ( rc == VINF_SUCCESS
2916 || rc == VINF_PGM_CACHED_PAGE)
2917 pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
2918 else
2919 AssertMsgFailedReturn(("rc=%Rrc\n", rc), VERR_INTERNAL_ERROR);
2920
2921 PdeDst.u &= X86_PDE_AVL_MASK;
2922 PdeDst.u |= pShwPage->Core.Key;
2923 PdeDst.n.u1Present = 1;
2924 PdeDst.n.u1Write = 1;
2925# if PGM_SHW_TYPE == PGM_TYPE_EPT
2926 PdeDst.n.u1Execute = 1;
2927# else
2928 PdeDst.n.u1User = 1;
2929 PdeDst.n.u1Accessed = 1;
2930# endif
2931 ASMAtomicWriteSize(pPdeDst, PdeDst.u);
2932
2933 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
2934 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2935 return rc;
2936
2937#else
2938 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE));
2939 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
2940 return VERR_INTERNAL_ERROR;
2941#endif
2942}
2943
2944
2945
2946/**
2947 * Prefetch a page/set of pages.
2948 *
2949 * Typically used to sync commonly used pages before entering raw mode
2950 * after a CR3 reload.
2951 *
2952 * @returns VBox status code.
2953 * @param pVCpu The VMCPU handle.
2954 * @param GCPtrPage Page to invalidate.
2955 */
2956PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage)
2957{
2958#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
2959 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
2960 /*
2961 * Check that all Guest levels thru the PDE are present, getting the
2962 * PD and PDE in the processes.
2963 */
2964 int rc = VINF_SUCCESS;
2965# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
2966# if PGM_GST_TYPE == PGM_TYPE_32BIT
2967 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
2968 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
2969# elif PGM_GST_TYPE == PGM_TYPE_PAE
2970 unsigned iPDSrc;
2971 X86PDPE PdpeSrc;
2972 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
2973 if (!pPDSrc)
2974 return VINF_SUCCESS; /* not present */
2975# elif PGM_GST_TYPE == PGM_TYPE_AMD64
2976 unsigned iPDSrc;
2977 PX86PML4E pPml4eSrc;
2978 X86PDPE PdpeSrc;
2979 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
2980 if (!pPDSrc)
2981 return VINF_SUCCESS; /* not present */
2982# endif
2983 const GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
2984# else
2985 PGSTPD pPDSrc = NULL;
2986 const unsigned iPDSrc = 0;
2987 GSTPDE PdeSrc;
2988
2989 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
2990 PdeSrc.n.u1Present = 1;
2991 PdeSrc.n.u1Write = 1;
2992 PdeSrc.n.u1Accessed = 1;
2993 PdeSrc.n.u1User = 1;
2994# endif
2995
2996 if (PdeSrc.n.u1Present && PdeSrc.n.u1Accessed)
2997 {
2998# if PGM_SHW_TYPE == PGM_TYPE_32BIT
2999 const X86PDE PdeDst = pgmShwGet32BitPDE(&pVCpu->pgm.s, GCPtrPage);
3000# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3001 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3002 PX86PDPAE pPDDst;
3003 X86PDEPAE PdeDst;
3004# if PGM_GST_TYPE != PGM_TYPE_PAE
3005 X86PDPE PdpeSrc;
3006
3007 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
3008 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
3009# endif
3010 int rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);
3011 if (rc != VINF_SUCCESS)
3012 {
3013 AssertRC(rc);
3014 return rc;
3015 }
3016 Assert(pPDDst);
3017 PdeDst = pPDDst->a[iPDDst];
3018
3019# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3020 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3021 PX86PDPAE pPDDst;
3022 X86PDEPAE PdeDst;
3023
3024# if PGM_GST_TYPE == PGM_TYPE_PROT
3025 /* AMD-V nested paging */
3026 X86PML4E Pml4eSrc;
3027 X86PDPE PdpeSrc;
3028 PX86PML4E pPml4eSrc = &Pml4eSrc;
3029
3030 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3031 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3032 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3033# endif
3034
3035 int rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3036 if (rc != VINF_SUCCESS)
3037 {
3038 AssertRC(rc);
3039 return rc;
3040 }
3041 Assert(pPDDst);
3042 PdeDst = pPDDst->a[iPDDst];
3043# endif
3044 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
3045 {
3046 if (!PdeDst.n.u1Present)
3047 {
3048 PVM pVM = pVCpu->CTX_SUFF(pVM);
3049 /** r=bird: This guy will set the A bit on the PDE, probably harmless. */
3050 pgmLock(pVM);
3051 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
3052 pgmUnlock(pVM);
3053 }
3054 else
3055 {
3056 /** @note We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
3057 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
3058 * makes no sense to prefetch more than one page.
3059 */
3060 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
3061 if (RT_SUCCESS(rc))
3062 rc = VINF_SUCCESS;
3063 }
3064 }
3065 }
3066 return rc;
3067
3068#elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3069 return VINF_SUCCESS; /* ignore */
3070#endif
3071}
3072
3073
3074
3075
3076/**
3077 * Syncs a page during a PGMVerifyAccess() call.
3078 *
3079 * @returns VBox status code (informational included).
3080 * @param pVCpu The VMCPU handle.
3081 * @param GCPtrPage The address of the page to sync.
3082 * @param fPage The effective guest page flags.
3083 * @param uErr The trap error code.
3084 */
3085PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
3086{
3087 PVM pVM = pVCpu->CTX_SUFF(pVM);
3088
3089 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));
3090
3091 Assert(!HWACCMIsNestedPagingActive(pVM));
3092#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
3093 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3094
3095# ifndef IN_RING0
3096 if (!(fPage & X86_PTE_US))
3097 {
3098 /*
3099 * Mark this page as safe.
3100 */
3101 /** @todo not correct for pages that contain both code and data!! */
3102 Log(("CSAMMarkPage %RGv; scanned=%d\n", GCPtrPage, true));
3103 CSAMMarkPage(pVM, (RTRCPTR)GCPtrPage, true);
3104 }
3105# endif
3106
3107 /*
3108 * Get guest PD and index.
3109 */
3110# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3111# if PGM_GST_TYPE == PGM_TYPE_32BIT
3112 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT;
3113 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
3114# elif PGM_GST_TYPE == PGM_TYPE_PAE
3115 unsigned iPDSrc = 0;
3116 X86PDPE PdpeSrc;
3117 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
3118
3119 if (pPDSrc)
3120 {
3121 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3122 return VINF_EM_RAW_GUEST_TRAP;
3123 }
3124# elif PGM_GST_TYPE == PGM_TYPE_AMD64
3125 unsigned iPDSrc;
3126 PX86PML4E pPml4eSrc;
3127 X86PDPE PdpeSrc;
3128 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3129 if (!pPDSrc)
3130 {
3131 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
3132 return VINF_EM_RAW_GUEST_TRAP;
3133 }
3134# endif
3135# else
3136 PGSTPD pPDSrc = NULL;
3137 const unsigned iPDSrc = 0;
3138# endif
3139 int rc = VINF_SUCCESS;
3140
3141 pgmLock(pVM);
3142
3143 /*
3144 * First check if the shadow pd is present.
3145 */
3146# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3147 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVCpu->pgm.s, GCPtrPage);
3148# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3149 PX86PDEPAE pPdeDst;
3150 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3151 PX86PDPAE pPDDst;
3152# if PGM_GST_TYPE != PGM_TYPE_PAE
3153 X86PDPE PdpeSrc;
3154
3155 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
3156 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
3157# endif
3158 rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);
3159 if (rc != VINF_SUCCESS)
3160 {
3161 AssertRC(rc);
3162 return rc;
3163 }
3164 Assert(pPDDst);
3165 pPdeDst = &pPDDst->a[iPDDst];
3166
3167# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3168 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
3169 PX86PDPAE pPDDst;
3170 PX86PDEPAE pPdeDst;
3171
3172# if PGM_GST_TYPE == PGM_TYPE_PROT
3173 /* AMD-V nested paging */
3174 X86PML4E Pml4eSrc;
3175 X86PDPE PdpeSrc;
3176 PX86PML4E pPml4eSrc = &Pml4eSrc;
3177
3178 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
3179 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
3180 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
3181# endif
3182
3183 rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
3184 if (rc != VINF_SUCCESS)
3185 {
3186 AssertRC(rc);
3187 return rc;
3188 }
3189 Assert(pPDDst);
3190 pPdeDst = &pPDDst->a[iPDDst];
3191# endif
3192
3193# if defined(IN_RC)
3194 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
3195 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
3196# endif
3197
3198 if (!pPdeDst->n.u1Present)
3199 {
3200 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
3201 if (rc != VINF_SUCCESS)
3202 {
3203# if defined(IN_RC)
3204 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
3205 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
3206# endif
3207 pgmUnlock(pVM);
3208 AssertRC(rc);
3209 return rc;
3210 }
3211 }
3212
3213# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
3214 /* Check for dirty bit fault */
3215 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
3216 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
3217 Log(("PGMVerifyAccess: success (dirty)\n"));
3218 else
3219 {
3220 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
3221# else
3222 {
3223 GSTPDE PdeSrc;
3224 PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
3225 PdeSrc.n.u1Present = 1;
3226 PdeSrc.n.u1Write = 1;
3227 PdeSrc.n.u1Accessed = 1;
3228 PdeSrc.n.u1User = 1;
3229
3230# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
3231 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
3232 if (uErr & X86_TRAP_PF_US)
3233 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser));
3234 else /* supervisor */
3235 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
3236
3237 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
3238 if (RT_SUCCESS(rc))
3239 {
3240 /* Page was successfully synced */
3241 Log2(("PGMVerifyAccess: success (sync)\n"));
3242 rc = VINF_SUCCESS;
3243 }
3244 else
3245 {
3246 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", GCPtrPage, rc));
3247 rc = VINF_EM_RAW_GUEST_TRAP;
3248 }
3249 }
3250# if defined(IN_RC)
3251 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
3252 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
3253# endif
3254 pgmUnlock(pVM);
3255 return rc;
3256
3257#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3258
3259 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
3260 return VERR_INTERNAL_ERROR;
3261#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
3262}
3263
3264
3265#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
3266# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
3267/**
3268 * Figures out which kind of shadow page this guest PDE warrants.
3269 *
3270 * @returns Shadow page kind.
3271 * @param pPdeSrc The guest PDE in question.
3272 * @param cr4 The current guest cr4 value.
3273 */
3274DECLINLINE(PGMPOOLKIND) PGM_BTH_NAME(CalcPageKind)(const GSTPDE *pPdeSrc, uint32_t cr4)
3275{
3276# if PMG_GST_TYPE == PGM_TYPE_AMD64
3277 if (!pPdeSrc->n.u1Size)
3278# else
3279 if (!pPdeSrc->n.u1Size || !(cr4 & X86_CR4_PSE))
3280# endif
3281 return BTH_PGMPOOLKIND_PT_FOR_PT;
3282 //switch (pPdeSrc->u & (X86_PDE4M_RW | X86_PDE4M_US /*| X86_PDE4M_PAE_NX*/))
3283 //{
3284 // case 0:
3285 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RO;
3286 // case X86_PDE4M_RW:
3287 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW;
3288 // case X86_PDE4M_US:
3289 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US;
3290 // case X86_PDE4M_RW | X86_PDE4M_US:
3291 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US;
3292# if 0
3293 // case X86_PDE4M_PAE_NX:
3294 // return BTH_PGMPOOLKIND_PT_FOR_BIG_NX;
3295 // case X86_PDE4M_RW | X86_PDE4M_PAE_NX:
3296 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_NX;
3297 // case X86_PDE4M_US | X86_PDE4M_PAE_NX:
3298 // return BTH_PGMPOOLKIND_PT_FOR_BIG_US_NX;
3299 // case X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PAE_NX:
3300 // return BTH_PGMPOOLKIND_PT_FOR_BIG_RW_US_NX;
3301# endif
3302 return BTH_PGMPOOLKIND_PT_FOR_BIG;
3303 //}
3304}
3305# endif
3306#endif
3307
3308#undef MY_STAM_COUNTER_INC
3309#define MY_STAM_COUNTER_INC(a) do { } while (0)
3310
3311
3312/**
3313 * Syncs the paging hierarchy starting at CR3.
3314 *
3315 * @returns VBox status code, no specials.
3316 * @param pVCpu The VMCPU handle.
3317 * @param cr0 Guest context CR0 register
3318 * @param cr3 Guest context CR3 register
3319 * @param cr4 Guest context CR4 register
3320 * @param fGlobal Including global page directories or not
3321 */
3322PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
3323{
3324 PVM pVM = pVCpu->CTX_SUFF(pVM);
3325
3326 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
3327 fGlobal = true; /* Change this CR3 reload to be a global one. */
3328
3329 LogFlow(("SyncCR3 %d\n", fGlobal));
3330
3331#if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
3332 /*
3333 * Update page access handlers.
3334 * The virtual are always flushed, while the physical are only on demand.
3335 * WARNING: We are incorrectly not doing global flushing on Virtual Handler updates. We'll
3336 * have to look into that later because it will have a bad influence on the performance.
3337 * @note SvL: There's no need for that. Just invalidate the virtual range(s).
3338 * bird: Yes, but that won't work for aliases.
3339 */
3340 /** @todo this MUST go away. See #1557. */
3341 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3342 PGM_GST_NAME(HandlerVirtualUpdate)(pVM, cr4);
3343 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3Handlers), h);
3344#endif
3345
3346#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3347 /*
3348 * Nested / EPT - almost no work.
3349 */
3350 /** @todo check if this is really necessary; the call does it as well... */
3351 HWACCMFlushTLB(pVM);
3352 return VINF_SUCCESS;
3353
3354#elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3355 /*
3356 * AMD64 (Shw & Gst) - No need to check all paging levels; we zero
3357 * out the shadow parts when the guest modifies its tables.
3358 */
3359 return VINF_SUCCESS;
3360
3361#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3362
3363# ifdef PGM_WITHOUT_MAPPINGS
3364 Assert(pVM->pgm.s.fMappingsFixed);
3365 return VINF_SUCCESS;
3366# else
3367 /* Nothing to do when mappings are fixed. */
3368 if (pVM->pgm.s.fMappingsFixed)
3369 return VINF_SUCCESS;
3370
3371 int rc = PGMMapResolveConflicts(pVM);
3372 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
3373 if (rc == VINF_PGM_SYNC_CR3)
3374 {
3375 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
3376 return VINF_PGM_SYNC_CR3;
3377 }
3378# endif
3379 return VINF_SUCCESS;
3380#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
3381}
3382
3383
3384
3385
3386#ifdef VBOX_STRICT
3387#ifdef IN_RC
3388# undef AssertMsgFailed
3389# define AssertMsgFailed Log
3390#endif
3391#ifdef IN_RING3
3392# include <VBox/dbgf.h>
3393
3394/**
3395 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
3396 *
3397 * @returns VBox status code (VINF_SUCCESS).
3398 * @param cr3 The root of the hierarchy.
3399 * @param crr The cr4, only PAE and PSE is currently used.
3400 * @param fLongMode Set if long mode, false if not long mode.
3401 * @param cMaxDepth Number of levels to dump.
3402 * @param pHlp Pointer to the output functions.
3403 */
3404__BEGIN_DECLS
3405VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
3406__END_DECLS
3407
3408#endif
3409
3410/**
3411 * Checks that the shadow page table is in sync with the guest one.
3412 *
3413 * @returns The number of errors.
3414 * @param pVM The virtual machine.
3415 * @param pVCpu The VMCPU handle.
3416 * @param cr3 Guest context CR3 register
3417 * @param cr4 Guest context CR4 register
3418 * @param GCPtr Where to start. Defaults to 0.
3419 * @param cb How much to check. Defaults to everything.
3420 */
3421PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
3422{
3423#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
3424 return 0;
3425#else
3426 unsigned cErrors = 0;
3427 PVM pVM = pVCpu->CTX_SUFF(pVM);
3428 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3429
3430#if PGM_GST_TYPE == PGM_TYPE_PAE
3431 /** @todo currently broken; crashes below somewhere */
3432 AssertFailed();
3433#endif
3434
3435#if PGM_GST_TYPE == PGM_TYPE_32BIT \
3436 || PGM_GST_TYPE == PGM_TYPE_PAE \
3437 || PGM_GST_TYPE == PGM_TYPE_AMD64
3438
3439# if PGM_GST_TYPE == PGM_TYPE_AMD64
3440 bool fBigPagesSupported = true;
3441# else
3442 bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE);
3443# endif
3444 PPGMCPU pPGM = &pVCpu->pgm.s;
3445 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */
3446 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */
3447# ifndef IN_RING0
3448 RTHCPHYS HCPhys; /* general usage. */
3449# endif
3450 int rc;
3451
3452 /*
3453 * Check that the Guest CR3 and all its mappings are correct.
3454 */
3455 AssertMsgReturn(pPGM->GCPhysCR3 == (cr3 & GST_CR3_PAGE_MASK),
3456 ("Invalid GCPhysCR3=%RGp cr3=%RGp\n", pPGM->GCPhysCR3, (RTGCPHYS)cr3),
3457 false);
3458# if !defined(IN_RING0) && PGM_GST_TYPE != PGM_TYPE_AMD64
3459# if PGM_GST_TYPE == PGM_TYPE_32BIT
3460 rc = PGMShwGetPage(pVCpu, (RTGCPTR)pPGM->pGst32BitPdRC, NULL, &HCPhysShw);
3461# else
3462 rc = PGMShwGetPage(pVCpu, (RTGCPTR)pPGM->pGstPaePdptRC, NULL, &HCPhysShw);
3463# endif
3464 AssertRCReturn(rc, 1);
3465 HCPhys = NIL_RTHCPHYS;
3466 rc = pgmRamGCPhys2HCPhys(&pVM->pgm.s, cr3 & GST_CR3_PAGE_MASK, &HCPhys);
3467 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false);
3468# if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3)
3469 pgmGstGet32bitPDPtr(pPGM);
3470 RTGCPHYS GCPhys;
3471 rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys);
3472 AssertRCReturn(rc, 1);
3473 AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false);
3474# endif
3475# endif /* !IN_RING0 */
3476
3477 /*
3478 * Get and check the Shadow CR3.
3479 */
3480# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3481 unsigned cPDEs = X86_PG_ENTRIES;
3482 unsigned cIncrement = X86_PG_ENTRIES * PAGE_SIZE;
3483# elif PGM_SHW_TYPE == PGM_TYPE_PAE
3484# if PGM_GST_TYPE == PGM_TYPE_32BIT
3485 unsigned cPDEs = X86_PG_PAE_ENTRIES * 4; /* treat it as a 2048 entry table. */
3486# else
3487 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3488# endif
3489 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3490# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
3491 unsigned cPDEs = X86_PG_PAE_ENTRIES;
3492 unsigned cIncrement = X86_PG_PAE_ENTRIES * PAGE_SIZE;
3493# endif
3494 if (cb != ~(RTGCPTR)0)
3495 cPDEs = RT_MIN(cb >> SHW_PD_SHIFT, 1);
3496
3497/** @todo call the other two PGMAssert*() functions. */
3498
3499# if PGM_GST_TYPE == PGM_TYPE_AMD64
3500 unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3501
3502 for (; iPml4 < X86_PG_PAE_ENTRIES; iPml4++)
3503 {
3504 PPGMPOOLPAGE pShwPdpt = NULL;
3505 PX86PML4E pPml4eSrc;
3506 PX86PML4E pPml4eDst;
3507 RTGCPHYS GCPhysPdptSrc;
3508
3509 pPml4eSrc = pgmGstGetLongModePML4EPtr(&pVCpu->pgm.s, iPml4);
3510 pPml4eDst = pgmShwGetLongModePML4EPtr(&pVCpu->pgm.s, iPml4);
3511
3512 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */
3513 if (!pPml4eDst->n.u1Present)
3514 {
3515 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3516 continue;
3517 }
3518
3519 pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
3520 GCPhysPdptSrc = pPml4eSrc->u & X86_PML4E_PG_MASK_FULL;
3521
3522 if (pPml4eSrc->n.u1Present != pPml4eDst->n.u1Present)
3523 {
3524 AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3525 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3526 cErrors++;
3527 continue;
3528 }
3529
3530 if (GCPhysPdptSrc != pShwPdpt->GCPhys)
3531 {
3532 AssertMsgFailed(("Physical address doesn't match! iPml4 %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
3533 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3534 cErrors++;
3535 continue;
3536 }
3537
3538 if ( pPml4eDst->n.u1User != pPml4eSrc->n.u1User
3539 || pPml4eDst->n.u1Write != pPml4eSrc->n.u1Write
3540 || pPml4eDst->n.u1NoExecute != pPml4eSrc->n.u1NoExecute)
3541 {
3542 AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
3543 GCPtr += _2M * UINT64_C(512) * UINT64_C(512);
3544 cErrors++;
3545 continue;
3546 }
3547# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3548 {
3549# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
3550
3551# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
3552 /*
3553 * Check the PDPTEs too.
3554 */
3555 unsigned iPdpt = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
3556
3557 for (;iPdpt <= SHW_PDPT_MASK; iPdpt++)
3558 {
3559 unsigned iPDSrc;
3560 PPGMPOOLPAGE pShwPde = NULL;
3561 PX86PDPE pPdpeDst;
3562 RTGCPHYS GCPhysPdeSrc;
3563# if PGM_GST_TYPE == PGM_TYPE_PAE
3564 X86PDPE PdpeSrc;
3565 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtr, &iPDSrc, &PdpeSrc);
3566 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
3567# else
3568 PX86PML4E pPml4eSrc;
3569 X86PDPE PdpeSrc;
3570 PX86PDPT pPdptDst;
3571 PX86PDPAE pPDDst;
3572 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc);
3573
3574 rc = pgmShwGetLongModePDPtr(pVCpu, GCPtr, NULL, &pPdptDst, &pPDDst);
3575 if (rc != VINF_SUCCESS)
3576 {
3577 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
3578 GCPtr += 512 * _2M;
3579 continue; /* next PDPTE */
3580 }
3581 Assert(pPDDst);
3582# endif
3583 Assert(iPDSrc == 0);
3584
3585 pPdpeDst = &pPdptDst->a[iPdpt];
3586
3587 if (!pPdpeDst->n.u1Present)
3588 {
3589 GCPtr += 512 * _2M;
3590 continue; /* next PDPTE */
3591 }
3592
3593 pShwPde = pgmPoolGetPage(pPool, pPdpeDst->u & X86_PDPE_PG_MASK);
3594 GCPhysPdeSrc = PdpeSrc.u & X86_PDPE_PG_MASK;
3595
3596 if (pPdpeDst->n.u1Present != PdpeSrc.n.u1Present)
3597 {
3598 AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3599 GCPtr += 512 * _2M;
3600 cErrors++;
3601 continue;
3602 }
3603
3604 if (GCPhysPdeSrc != pShwPde->GCPhys)
3605 {
3606# if PGM_GST_TYPE == PGM_TYPE_AMD64
3607 AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3608# else
3609 AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
3610# endif
3611 GCPtr += 512 * _2M;
3612 cErrors++;
3613 continue;
3614 }
3615
3616# if PGM_GST_TYPE == PGM_TYPE_AMD64
3617 if ( pPdpeDst->lm.u1User != PdpeSrc.lm.u1User
3618 || pPdpeDst->lm.u1Write != PdpeSrc.lm.u1Write
3619 || pPdpeDst->lm.u1NoExecute != PdpeSrc.lm.u1NoExecute)
3620 {
3621 AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
3622 GCPtr += 512 * _2M;
3623 cErrors++;
3624 continue;
3625 }
3626# endif
3627
3628# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3629 {
3630# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
3631# if PGM_GST_TYPE == PGM_TYPE_32BIT
3632 GSTPD const *pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
3633# if PGM_SHW_TYPE == PGM_TYPE_32BIT
3634 PCX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
3635# endif
3636# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
3637 /*
3638 * Iterate the shadow page directory.
3639 */
3640 GCPtr = (GCPtr >> SHW_PD_SHIFT) << SHW_PD_SHIFT;
3641 unsigned iPDDst = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
3642
3643 for (;
3644 iPDDst < cPDEs;
3645 iPDDst++, GCPtr += cIncrement)
3646 {
3647# if PGM_SHW_TYPE == PGM_TYPE_PAE
3648 const SHWPDE PdeDst = *pgmShwGetPaePDEPtr(pPGM, GCPtr);
3649# else
3650 const SHWPDE PdeDst = pPDDst->a[iPDDst];
3651# endif
3652 if (PdeDst.u & PGM_PDFLAGS_MAPPING)
3653 {
3654 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
3655 if ((PdeDst.u & X86_PDE_AVL_MASK) != PGM_PDFLAGS_MAPPING)
3656 {
3657 AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
3658 cErrors++;
3659 continue;
3660 }
3661 }
3662 else if ( (PdeDst.u & X86_PDE_P)
3663 || ((PdeDst.u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY))
3664 )
3665 {
3666 HCPhysShw = PdeDst.u & SHW_PDE_PG_MASK;
3667 PPGMPOOLPAGE pPoolPage = pgmPoolGetPage(pPool, HCPhysShw);
3668 if (!pPoolPage)
3669 {
3670 AssertMsgFailed(("Invalid page table address %RHp at %RGv! PdeDst=%#RX64\n",
3671 HCPhysShw, GCPtr, (uint64_t)PdeDst.u));
3672 cErrors++;
3673 continue;
3674 }
3675 const SHWPT *pPTDst = (const SHWPT *)PGMPOOL_PAGE_2_PTR(pVM, pPoolPage);
3676
3677 if (PdeDst.u & (X86_PDE4M_PWT | X86_PDE4M_PCD))
3678 {
3679 AssertMsgFailed(("PDE flags PWT and/or PCD is set at %RGv! These flags are not virtualized! PdeDst=%#RX64\n",
3680 GCPtr, (uint64_t)PdeDst.u));
3681 cErrors++;
3682 }
3683
3684 if (PdeDst.u & (X86_PDE4M_G | X86_PDE4M_D))
3685 {
3686 AssertMsgFailed(("4K PDE reserved flags at %RGv! PdeDst=%#RX64\n",
3687 GCPtr, (uint64_t)PdeDst.u));
3688 cErrors++;
3689 }
3690
3691 const GSTPDE PdeSrc = pPDSrc->a[(iPDDst >> (GST_PD_SHIFT - SHW_PD_SHIFT)) & GST_PD_MASK];
3692 if (!PdeSrc.n.u1Present)
3693 {
3694 AssertMsgFailed(("Guest PDE at %RGv is not present! PdeDst=%#RX64 PdeSrc=%#RX64\n",
3695 GCPtr, (uint64_t)PdeDst.u, (uint64_t)PdeSrc.u));
3696 cErrors++;
3697 continue;
3698 }
3699
3700 if ( !PdeSrc.b.u1Size
3701 || !fBigPagesSupported)
3702 {
3703 GCPhysGst = PdeSrc.u & GST_PDE_PG_MASK;
3704# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3705 GCPhysGst |= (iPDDst & 1) * (PAGE_SIZE / 2);
3706# endif
3707 }
3708 else
3709 {
3710# if PGM_GST_TYPE == PGM_TYPE_32BIT
3711 if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK)
3712 {
3713 AssertMsgFailed(("Guest PDE at %RGv is using PSE36 or similar! PdeSrc=%#RX64\n",
3714 GCPtr, (uint64_t)PdeSrc.u));
3715 cErrors++;
3716 continue;
3717 }
3718# endif
3719 GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
3720# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3721 GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT);
3722# endif
3723 }
3724
3725 if ( pPoolPage->enmKind
3726 != (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
3727 {
3728 AssertMsgFailed(("Invalid shadow page table kind %d at %RGv! PdeSrc=%#RX64\n",
3729 pPoolPage->enmKind, GCPtr, (uint64_t)PdeSrc.u));
3730 cErrors++;
3731 }
3732
3733 PPGMPAGE pPhysPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
3734 if (!pPhysPage)
3735 {
3736 AssertMsgFailed(("Cannot find guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
3737 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
3738 cErrors++;
3739 continue;
3740 }
3741
3742 if (GCPhysGst != pPoolPage->GCPhys)
3743 {
3744 AssertMsgFailed(("GCPhysGst=%RGp != pPage->GCPhys=%RGp at %RGv\n",
3745 GCPhysGst, pPoolPage->GCPhys, GCPtr));
3746 cErrors++;
3747 continue;
3748 }
3749
3750 if ( !PdeSrc.b.u1Size
3751 || !fBigPagesSupported)
3752 {
3753 /*
3754 * Page Table.
3755 */
3756 const GSTPT *pPTSrc;
3757 rc = PGM_GCPHYS_2_PTR(pVM, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
3758 if (RT_FAILURE(rc))
3759 {
3760 AssertMsgFailed(("Cannot map/convert guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
3761 GCPhysGst, GCPtr, (uint64_t)PdeSrc.u));
3762 cErrors++;
3763 continue;
3764 }
3765 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/))
3766 != (PdeDst.u & (X86_PDE_P | X86_PDE_US | X86_PDE_RW/* | X86_PDE_A*/)))
3767 {
3768 /// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
3769 // (This problem will go away when/if we shadow multiple CR3s.)
3770 AssertMsgFailed(("4K PDE flags mismatch at %RGv! PdeSrc=%#RX64 PdeDst=%#RX64\n",
3771 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
3772 cErrors++;
3773 continue;
3774 }
3775 if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
3776 {
3777 AssertMsgFailed(("4K PDEs cannot have PGM_PDFLAGS_TRACK_DIRTY set! GCPtr=%RGv PdeDst=%#RX64\n",
3778 GCPtr, (uint64_t)PdeDst.u));
3779 cErrors++;
3780 continue;
3781 }
3782
3783 /* iterate the page table. */
3784# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
3785 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
3786 const unsigned offPTSrc = ((GCPtr >> SHW_PD_SHIFT) & 1) * 512;
3787# else
3788 const unsigned offPTSrc = 0;
3789# endif
3790 for (unsigned iPT = 0, off = 0;
3791 iPT < RT_ELEMENTS(pPTDst->a);
3792 iPT++, off += PAGE_SIZE)
3793 {
3794 const SHWPTE PteDst = pPTDst->a[iPT];
3795
3796 /* skip not-present entries. */
3797 if (!(PteDst.u & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
3798 continue;
3799 Assert(PteDst.n.u1Present);
3800
3801 const GSTPTE PteSrc = pPTSrc->a[iPT + offPTSrc];
3802 if (!PteSrc.n.u1Present)
3803 {
3804# ifdef IN_RING3
3805 PGMAssertHandlerAndFlagsInSync(pVM);
3806 PGMR3DumpHierarchyGC(pVM, cr3, cr4, (PdeSrc.u & GST_PDE_PG_MASK));
3807# endif
3808 AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n",
3809 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u, pPTSrc, iPT + offPTSrc, PdeSrc.au32[0],
3810 (PdeSrc.u & GST_PDE_PG_MASK) + (iPT + offPTSrc)*sizeof(PteSrc)));
3811 cErrors++;
3812 continue;
3813 }
3814
3815 uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
3816# if 1 /** @todo sync accessed bit properly... */
3817 fIgnoreFlags |= X86_PTE_A;
3818# endif
3819
3820 /* match the physical addresses */
3821 HCPhysShw = PteDst.u & SHW_PTE_PG_MASK;
3822 GCPhysGst = PteSrc.u & GST_PTE_PG_MASK;
3823
3824# ifdef IN_RING3
3825 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
3826 if (RT_FAILURE(rc))
3827 {
3828 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */
3829 {
3830 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
3831 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3832 cErrors++;
3833 continue;
3834 }
3835 }
3836 else if (HCPhysShw != (HCPhys & SHW_PTE_PG_MASK))
3837 {
3838 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
3839 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3840 cErrors++;
3841 continue;
3842 }
3843# endif
3844
3845 pPhysPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
3846 if (!pPhysPage)
3847 {
3848# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
3849 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */
3850 {
3851 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
3852 GCPhysGst, GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3853 cErrors++;
3854 continue;
3855 }
3856# endif
3857 if (PteDst.n.u1Write)
3858 {
3859 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
3860 GCPtr + off, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3861 cErrors++;
3862 }
3863 fIgnoreFlags |= X86_PTE_RW;
3864 }
3865 else if (HCPhysShw != PGM_PAGE_GET_HCPHYS(pPhysPage))
3866 {
3867 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage:%R[pgmpage] GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
3868 GCPtr + off, HCPhysShw, pPhysPage, GCPhysGst, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3869 cErrors++;
3870 continue;
3871 }
3872
3873 /* flags */
3874 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
3875 {
3876 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
3877 {
3878 if (PteDst.n.u1Write)
3879 {
3880 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n",
3881 GCPtr + off, pPhysPage, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3882 cErrors++;
3883 continue;
3884 }
3885 fIgnoreFlags |= X86_PTE_RW;
3886 }
3887 else
3888 {
3889 if (PteDst.n.u1Present)
3890 {
3891 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n",
3892 GCPtr + off, pPhysPage, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3893 cErrors++;
3894 continue;
3895 }
3896 fIgnoreFlags |= X86_PTE_P;
3897 }
3898 }
3899 else
3900 {
3901 if (!PteSrc.n.u1Dirty && PteSrc.n.u1Write)
3902 {
3903 if (PteDst.n.u1Write)
3904 {
3905 AssertMsgFailed(("!DIRTY page at %RGv is writable! PteSrc=%#RX64 PteDst=%#RX64\n",
3906 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3907 cErrors++;
3908 continue;
3909 }
3910 if (!(PteDst.u & PGM_PTFLAGS_TRACK_DIRTY))
3911 {
3912 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
3913 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3914 cErrors++;
3915 continue;
3916 }
3917 if (PteDst.n.u1Dirty)
3918 {
3919 AssertMsgFailed(("!DIRTY page at %RGv is marked DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
3920 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3921 cErrors++;
3922 }
3923# if 0 /** @todo sync access bit properly... */
3924 if (PteDst.n.u1Accessed != PteSrc.n.u1Accessed)
3925 {
3926 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
3927 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3928 cErrors++;
3929 }
3930 fIgnoreFlags |= X86_PTE_RW;
3931# else
3932 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
3933# endif
3934 }
3935 else if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
3936 {
3937 /* access bit emulation (not implemented). */
3938 if (PteSrc.n.u1Accessed || PteDst.n.u1Present)
3939 {
3940 AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
3941 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3942 cErrors++;
3943 continue;
3944 }
3945 if (!PteDst.n.u1Accessed)
3946 {
3947 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
3948 GCPtr + off, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3949 cErrors++;
3950 }
3951 fIgnoreFlags |= X86_PTE_P;
3952 }
3953# ifdef DEBUG_sandervl
3954 fIgnoreFlags |= X86_PTE_D | X86_PTE_A;
3955# endif
3956 }
3957
3958 if ( (PteSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
3959 && (PteSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags)
3960 )
3961 {
3962 AssertMsgFailed(("Flags mismatch at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
3963 GCPtr + off, (uint64_t)PteSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
3964 fIgnoreFlags, (uint64_t)PteSrc.u, (uint64_t)PteDst.u));
3965 cErrors++;
3966 continue;
3967 }
3968 } /* foreach PTE */
3969 }
3970 else
3971 {
3972 /*
3973 * Big Page.
3974 */
3975 uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
3976 if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
3977 {
3978 if (PdeDst.n.u1Write)
3979 {
3980 AssertMsgFailed(("!DIRTY page at %RGv is writable! PdeSrc=%#RX64 PdeDst=%#RX64\n",
3981 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
3982 cErrors++;
3983 continue;
3984 }
3985 if (!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY))
3986 {
3987 AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
3988 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
3989 cErrors++;
3990 continue;
3991 }
3992# if 0 /** @todo sync access bit properly... */
3993 if (PdeDst.n.u1Accessed != PdeSrc.b.u1Accessed)
3994 {
3995 AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
3996 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
3997 cErrors++;
3998 }
3999 fIgnoreFlags |= X86_PTE_RW;
4000# else
4001 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
4002# endif
4003 }
4004 else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
4005 {
4006 /* access bit emulation (not implemented). */
4007 if (PdeSrc.b.u1Accessed || PdeDst.n.u1Present)
4008 {
4009 AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4010 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4011 cErrors++;
4012 continue;
4013 }
4014 if (!PdeDst.n.u1Accessed)
4015 {
4016 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
4017 GCPtr, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4018 cErrors++;
4019 }
4020 fIgnoreFlags |= X86_PTE_P;
4021 }
4022
4023 if ((PdeSrc.u & ~fIgnoreFlags) != (PdeDst.u & ~fIgnoreFlags))
4024 {
4025 AssertMsgFailed(("Flags mismatch (B) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
4026 GCPtr, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PdeDst.u & ~fIgnoreFlags,
4027 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
4028 cErrors++;
4029 }
4030
4031 /* iterate the page table. */
4032 for (unsigned iPT = 0, off = 0;
4033 iPT < RT_ELEMENTS(pPTDst->a);
4034 iPT++, off += PAGE_SIZE, GCPhysGst += PAGE_SIZE)
4035 {
4036 const SHWPTE PteDst = pPTDst->a[iPT];
4037
4038 if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
4039 {
4040 AssertMsgFailed(("The PTE at %RGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
4041 GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4042 cErrors++;
4043 }
4044
4045 /* skip not-present entries. */
4046 if (!PteDst.n.u1Present) /** @todo deal with ALL handlers and CSAM !P pages! */
4047 continue;
4048
4049 fIgnoreFlags = X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT | X86_PTE_D | X86_PTE_A | X86_PTE_G | X86_PTE_PAE_NX;
4050
4051 /* match the physical addresses */
4052 HCPhysShw = PteDst.u & X86_PTE_PAE_PG_MASK;
4053
4054# ifdef IN_RING3
4055 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
4056 if (RT_FAILURE(rc))
4057 {
4058 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */
4059 {
4060 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4061 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4062 cErrors++;
4063 }
4064 }
4065 else if (HCPhysShw != (HCPhys & X86_PTE_PAE_PG_MASK))
4066 {
4067 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4068 GCPtr + off, HCPhysShw, HCPhys, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4069 cErrors++;
4070 continue;
4071 }
4072# endif
4073 pPhysPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
4074 if (!pPhysPage)
4075 {
4076# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
4077 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */
4078 {
4079 AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
4080 GCPhysGst, GCPtr + off, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4081 cErrors++;
4082 continue;
4083 }
4084# endif
4085 if (PteDst.n.u1Write)
4086 {
4087 AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4088 GCPtr + off, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4089 cErrors++;
4090 }
4091 fIgnoreFlags |= X86_PTE_RW;
4092 }
4093 else if (HCPhysShw != PGM_PAGE_GET_HCPHYS(pPhysPage))
4094 {
4095 AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage=%R[pgmpage] GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
4096 GCPtr + off, HCPhysShw, pPhysPage, GCPhysGst, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4097 cErrors++;
4098 continue;
4099 }
4100
4101 /* flags */
4102 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPhysPage))
4103 {
4104 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPhysPage))
4105 {
4106 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
4107 {
4108 if (PteDst.n.u1Write)
4109 {
4110 AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n",
4111 GCPtr + off, pPhysPage, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4112 cErrors++;
4113 continue;
4114 }
4115 fIgnoreFlags |= X86_PTE_RW;
4116 }
4117 }
4118 else
4119 {
4120 if (PteDst.n.u1Present)
4121 {
4122 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n",
4123 GCPtr + off, pPhysPage, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4124 cErrors++;
4125 continue;
4126 }
4127 fIgnoreFlags |= X86_PTE_P;
4128 }
4129 }
4130
4131 if ( (PdeSrc.u & ~fIgnoreFlags) != (PteDst.u & ~fIgnoreFlags)
4132 && (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags) /* lazy phys handler dereg. */
4133 )
4134 {
4135 AssertMsgFailed(("Flags mismatch (BT) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
4136 GCPtr + off, (uint64_t)PdeSrc.u & ~fIgnoreFlags, (uint64_t)PteDst.u & ~fIgnoreFlags,
4137 fIgnoreFlags, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u));
4138 cErrors++;
4139 continue;
4140 }
4141 } /* for each PTE */
4142 }
4143 }
4144 /* not present */
4145
4146 } /* for each PDE */
4147
4148 } /* for each PDPTE */
4149
4150 } /* for each PML4E */
4151
4152# ifdef DEBUG
4153 if (cErrors)
4154 LogFlow(("AssertCR3: cErrors=%d\n", cErrors));
4155# endif
4156
4157#endif /* GST == 32BIT, PAE or AMD64 */
4158 return cErrors;
4159
4160#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
4161}
4162#endif /* VBOX_STRICT */
4163
4164
4165/**
4166 * Sets up the CR3 for shadow paging
4167 *
4168 * @returns Strict VBox status code.
4169 * @retval VINF_SUCCESS.
4170 *
4171 * @param pVCpu The VMCPU handle.
4172 * @param GCPhysCR3 The physical address in the CR3 register.
4173 */
4174PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
4175{
4176 PVM pVM = pVCpu->CTX_SUFF(pVM);
4177
4178 /* Update guest paging info. */
4179#if PGM_GST_TYPE == PGM_TYPE_32BIT \
4180 || PGM_GST_TYPE == PGM_TYPE_PAE \
4181 || PGM_GST_TYPE == PGM_TYPE_AMD64
4182
4183 LogFlow(("MapCR3: %RGp\n", GCPhysCR3));
4184
4185 /*
4186 * Map the page CR3 points at.
4187 */
4188 RTHCPTR HCPtrGuestCR3;
4189 RTHCPHYS HCPhysGuestCR3;
4190 pgmLock(pVM);
4191 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysCR3);
4192 AssertReturn(pPage, VERR_INTERNAL_ERROR_2);
4193 HCPhysGuestCR3 = PGM_PAGE_GET_HCPHYS(pPage);
4194 /** @todo this needs some reworking wrt. locking. */
4195# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4196 HCPtrGuestCR3 = NIL_RTHCPTR;
4197 int rc = VINF_SUCCESS;
4198# else
4199 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
4200# endif
4201 pgmUnlock(pVM);
4202 if (RT_SUCCESS(rc))
4203 {
4204 rc = PGMMap(pVM, (RTGCPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
4205 if (RT_SUCCESS(rc))
4206 {
4207# ifdef IN_RC
4208 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
4209# endif
4210# if PGM_GST_TYPE == PGM_TYPE_32BIT
4211 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
4212# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4213 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
4214# endif
4215 pVCpu->pgm.s.pGst32BitPdRC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
4216
4217# elif PGM_GST_TYPE == PGM_TYPE_PAE
4218 unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
4219 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4220# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4221 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
4222# endif
4223 pVCpu->pgm.s.pGstPaePdptRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
4224 Log(("Cached mapping %RRv\n", pVCpu->pgm.s.pGstPaePdptRC));
4225
4226 /*
4227 * Map the 4 PDs too.
4228 */
4229 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVCpu->pgm.s);
4230 RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
4231 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
4232 {
4233 if (pGuestPDPT->a[i].n.u1Present)
4234 {
4235 RTHCPTR HCPtr;
4236 RTHCPHYS HCPhys;
4237 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
4238 pgmLock(pVM);
4239 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
4240 AssertReturn(pPage, VERR_INTERNAL_ERROR_2);
4241 HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4242# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4243 HCPtr = NIL_RTHCPTR;
4244 int rc2 = VINF_SUCCESS;
4245# else
4246 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)&HCPtr);
4247# endif
4248 pgmUnlock(pVM);
4249 if (RT_SUCCESS(rc2))
4250 {
4251 rc = PGMMap(pVM, GCPtr, HCPhys, PAGE_SIZE, 0);
4252 AssertRCReturn(rc, rc);
4253
4254 pVCpu->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr;
4255# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4256 pVCpu->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr;
4257# endif
4258 pVCpu->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
4259 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
4260 PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */
4261 continue;
4262 }
4263 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
4264 }
4265
4266 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
4267# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4268 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
4269# endif
4270 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
4271 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4272 PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */
4273 }
4274
4275# elif PGM_GST_TYPE == PGM_TYPE_AMD64
4276 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
4277# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4278 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
4279# endif
4280# endif
4281 }
4282 else
4283 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4284 }
4285 else
4286 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
4287
4288#else /* prot/real stub */
4289 int rc = VINF_SUCCESS;
4290#endif
4291
4292 /* Update shadow paging info for guest modes with paging (32, pae, 64). */
4293# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4294 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4295 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
4296 && ( PGM_GST_TYPE != PGM_TYPE_REAL \
4297 && PGM_GST_TYPE != PGM_TYPE_PROT))
4298
4299 Assert(!HWACCMIsNestedPagingActive(pVM));
4300
4301 /*
4302 * Update the shadow root page as well since that's not fixed.
4303 */
4304 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4305 PPGMPOOLPAGE pOldShwPageCR3 = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
4306 uint32_t iOldShwUserTable = pVCpu->pgm.s.iShwUserTable;
4307 uint32_t iOldShwUser = pVCpu->pgm.s.iShwUser;
4308 PPGMPOOLPAGE pNewShwPageCR3;
4309
4310 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
4311 rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3);
4312 AssertFatalRC(rc);
4313 rc = VINF_SUCCESS;
4314
4315 /* Mark the page as locked; disallow flushing. */
4316 pgmPoolLockPage(pPool, pNewShwPageCR3);
4317
4318# ifdef IN_RC
4319 /* NOTE: We can't deal with jumps to ring 3 here as we're now in an inconsistent state! */
4320 bool fLog = VMMGCLogDisable(pVM);
4321 pgmLock(pVM);
4322# endif
4323
4324 pVCpu->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
4325 pVCpu->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
4326 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3;
4327# ifdef IN_RING0
4328 pVCpu->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4329 pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4330# elif defined(IN_RC)
4331 pVCpu->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4332 pVCpu->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4333# else
4334 pVCpu->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4335 pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4336# endif
4337
4338# ifndef PGM_WITHOUT_MAPPINGS
4339 /*
4340 * Apply all hypervisor mappings to the new CR3.
4341 * Note that SyncCR3 will be executed in case CR3 is changed in a guest paging mode; this will
4342 * make sure we check for conflicts in the new CR3 root.
4343 */
4344# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
4345 Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4346# endif
4347 rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);
4348 AssertRCReturn(rc, rc);
4349# endif
4350
4351 /* Set the current hypervisor CR3. */
4352 CPUMSetHyperCR3(pVCpu, PGMGetHyperCR3(pVCpu));
4353 SELMShadowCR3Changed(pVM, pVCpu);
4354
4355# ifdef IN_RC
4356 pgmUnlock(pVM);
4357 VMMGCLogRestore(pVM, fLog);
4358# endif
4359
4360 /* Clean up the old CR3 root. */
4361 if (pOldShwPageCR3)
4362 {
4363 Assert(pOldShwPageCR3->enmKind != PGMPOOLKIND_FREE);
4364# ifndef PGM_WITHOUT_MAPPINGS
4365 /* Remove the hypervisor mappings from the shadow page table. */
4366 pgmMapDeactivateCR3(pVM, pOldShwPageCR3);
4367# endif
4368 /* Mark the page as unlocked; allow flushing again. */
4369 pgmPoolUnlockPage(pPool, pOldShwPageCR3);
4370
4371 pgmPoolFreeByPage(pPool, pOldShwPageCR3, iOldShwUser, iOldShwUserTable);
4372 }
4373
4374# endif
4375
4376 return rc;
4377}
4378
4379/**
4380 * Unmaps the shadow CR3.
4381 *
4382 * @returns VBox status, no specials.
4383 * @param pVCpu The VMCPU handle.
4384 */
4385PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu)
4386{
4387 LogFlow(("UnmapCR3\n"));
4388
4389 int rc = VINF_SUCCESS;
4390 PVM pVM = pVCpu->CTX_SUFF(pVM);
4391
4392 /*
4393 * Update guest paging info.
4394 */
4395#if PGM_GST_TYPE == PGM_TYPE_32BIT
4396 pVCpu->pgm.s.pGst32BitPdR3 = 0;
4397# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4398 pVCpu->pgm.s.pGst32BitPdR0 = 0;
4399# endif
4400 pVCpu->pgm.s.pGst32BitPdRC = 0;
4401
4402#elif PGM_GST_TYPE == PGM_TYPE_PAE
4403 pVCpu->pgm.s.pGstPaePdptR3 = 0;
4404# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4405 pVCpu->pgm.s.pGstPaePdptR0 = 0;
4406# endif
4407 pVCpu->pgm.s.pGstPaePdptRC = 0;
4408 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
4409 {
4410 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
4411# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4412 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
4413# endif
4414 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
4415 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
4416 }
4417
4418#elif PGM_GST_TYPE == PGM_TYPE_AMD64
4419 pVCpu->pgm.s.pGstAmd64Pml4R3 = 0;
4420# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4421 pVCpu->pgm.s.pGstAmd64Pml4R0 = 0;
4422# endif
4423
4424#else /* prot/real mode stub */
4425 /* nothing to do */
4426#endif
4427
4428#if !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */
4429 /*
4430 * Update shadow paging info.
4431 */
4432# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
4433 || PGM_SHW_TYPE == PGM_TYPE_PAE \
4434 || PGM_SHW_TYPE == PGM_TYPE_AMD64))
4435
4436# if PGM_GST_TYPE != PGM_TYPE_REAL
4437 Assert(!HWACCMIsNestedPagingActive(pVM));
4438# endif
4439
4440# ifndef PGM_WITHOUT_MAPPINGS
4441 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
4442 /* Remove the hypervisor mappings from the shadow page table. */
4443 pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4444# endif
4445
4446 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
4447 {
4448 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4449
4450 Assert(pVCpu->pgm.s.iShwUser != PGMPOOL_IDX_NESTED_ROOT);
4451
4452 /* Mark the page as unlocked; allow flushing again. */
4453 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
4454
4455 pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
4456 pVCpu->pgm.s.pShwPageCR3R3 = 0;
4457 pVCpu->pgm.s.pShwPageCR3R0 = 0;
4458 pVCpu->pgm.s.pShwPageCR3RC = 0;
4459 pVCpu->pgm.s.iShwUser = 0;
4460 pVCpu->pgm.s.iShwUserTable = 0;
4461 }
4462# endif
4463#endif /* !IN_RC*/
4464
4465 return rc;
4466}
4467
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette