- Timestamp:
- Oct 7, 2020 7:53:07 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 140786
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r86466 r86476 1428 1428 AssertReturn(pPde, VERR_INTERNAL_ERROR_3); 1429 1429 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u)); 1430 pPde-> n.u1Write = 1;1430 pPde->u |= X86_PDE_RW; 1431 1431 Log(("-> PDE=%#llx (PAE)\n", pPde->u)); 1432 1432 break; -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r86468 r86476 467 467 * Set the accessed and dirty flags. 468 468 */ 469 /** @todo Use atomics here as we don't own the lock and stuff: */ 469 470 # if PGM_GST_TYPE == PGM_TYPE_AMD64 470 471 GstWalk.Pml4e.u |= X86_PML4E_A; … … 475 476 if (GstWalk.Core.fBigPage) 476 477 { 477 Assert(GstWalk.Pde. b.u1Size);478 Assert(GstWalk.Pde.u & X86_PDE_PS); 478 479 if (uErr & X86_TRAP_PF_RW) 479 480 { … … 489 490 else 490 491 { 491 Assert(! GstWalk.Pde.b.u1Size);492 Assert(!(GstWalk.Pde.u & X86_PDE_PS)); 492 493 GstWalk.Pde.u |= X86_PDE_A; 493 494 GstWalk.pPde->u |= X86_PDE_A; … … 629 630 */ 630 631 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 631 Assert(GstWalk.Pde. n.u1Present);632 Assert(GstWalk.Pde.u & X86_PDE_P); 632 633 # endif 633 634 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ … … 835 836 if ( GstWalk.Core.fEffectiveUS 836 837 && !GstWalk.Core.fEffectiveRW 837 && (GstWalk.Core.fBigPage || GstWalk.Pde.n.u1Write)838 && (GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW)) 838 839 && pVM->cCpus == 1 /* Sorry, no go on SMP. Add CFGM option? */) 839 840 { … … 918 919 else if ( GstWalk.Core.fEffectiveUS 919 920 && !GstWalk.Core.fEffectiveRW 920 && (GstWalk.Core.fBigPage || GstWalk.Pde.n.u1Write)921 && (GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW)) 921 922 && pVCpu->pgm.s.cNetwareWp0Hacks > 0 922 923 && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG … … 1117 1118 1118 1119 const SHWPDE PdeDst = *pPdeDst; 1119 if (! PdeDst.n.u1Present)1120 if (!(PdeDst.u & X86_PDE_P)) 1120 1121 { 1121 1122 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); … … 1149 1150 # endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */ 1150 1151 const bool fWasBigPage = RT_BOOL(PdeDst.u & PGM_PDFLAGS_BIG_PAGE); 1151 const bool fIsBigPage = PdeSrc.b.u1Size&& GST_IS_PSE_ACTIVE(pVCpu);1152 const bool fIsBigPage = (PdeSrc.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu); 1152 1153 if (fWasBigPage != fIsBigPage) 1153 1154 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); … … 1163 1164 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) 1164 1165 && fIsBigPage 1165 && PdeSrc.b.u1Global1166 && (PdeSrc.u & X86_PDE4M_G) 1166 1167 ) 1167 1168 ) … … 1179 1180 */ 1180 1181 rc = VINF_SUCCESS; 1181 if (PdeSrc. n.u1Present)1182 { 1183 Assert( PdeSrc.n.u1User == PdeDst.n.u1User1184 && (PdeSrc.n.u1Write || !PdeDst.n.u1Write|| pVCpu->pgm.s.cNetwareWp0Hacks > 0));1182 if (PdeSrc.u & X86_PDE_P) 1183 { 1184 Assert( (PdeSrc.u & X86_PDE_US) == (PdeDst.u & X86_PDE_US) 1185 && ((PdeSrc.u & X86_PDE_RW) || !(PdeDst.u & X86_PDE_RW) || pVCpu->pgm.s.cNetwareWp0Hacks > 0)); 1185 1186 # ifndef PGM_WITHOUT_MAPPINGS 1186 1187 if (PdeDst.u & PGM_PDFLAGS_MAPPING) … … 1264 1265 if ( (PdeSrc.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US)) 1265 1266 == (PdeDst.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US)) 1266 && ( PdeSrc.b.u1Dirty/** @todo rainy day: What about read-only 4M pages? not very common, but still... */1267 && ( (PdeSrc.u & X86_PDE4M_D) /** @todo rainy day: What about read-only 4M pages? not very common, but still... */ 1267 1268 || (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY))) 1268 1269 { … … 1615 1616 * we mark the page not present. 1616 1617 */ 1617 if (! PteSrc.n.u1Accessed || !PdeSrc.n.u1Accessed)1618 if (!(PteSrc.u & X86_PTE_A) || !(PdeSrc.u & X86_PDE_A)) 1618 1619 { 1619 1620 LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n")); … … 1625 1626 * when the page is modified. 1626 1627 */ 1627 else if (! PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))1628 else if (!(PteSrc.u & X86_PTE_D) && (PdeSrc.u & PteSrc.u & X86_PTE_RW)) 1628 1629 { 1630 AssertCompile(X86_PTE_RW == X86_PDE_RW); 1629 1631 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPage)); 1630 1632 SHW_PTE_SET(PteDst, … … 1747 1749 * Assert preconditions. 1748 1750 */ 1749 Assert(PdeSrc. n.u1Present);1751 Assert(PdeSrc.u & X86_PDE_P); 1750 1752 Assert(cPages); 1751 1753 # if 0 /* rarely useful; leave for debugging. */ … … 1798 1800 * the modified entry, we may end up here with a "stale" TLB entry. 1799 1801 */ 1800 if (! PdeDst.n.u1Present)1802 if (!(PdeDst.u & X86_PDE_P)) 1801 1803 { 1802 1804 Log(("CPU%u: SyncPage: Pde at %RGv changed behind our back? (pPdeDst=%p/%RX64) uErr=%#x\n", pVCpu->idCpu, GCPtrPage, pPdeDst, (uint64_t)PdeDst.u, (uint32_t)uErr)); … … 1820 1822 * Check that the page is present and that the shadow PDE isn't out of sync. 1821 1823 */ 1822 const bool fBigPage = PdeSrc.b.u1Size&& GST_IS_PSE_ACTIVE(pVCpu);1824 const bool fBigPage = (PdeSrc.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu); 1823 1825 const bool fPdeValid = !fBigPage ? GST_IS_PDE_VALID(pVCpu, PdeSrc) : GST_IS_BIG_PDE_VALID(pVCpu, PdeSrc); 1824 1826 RTGCPHYS GCPhys; … … 1840 1842 } 1841 1843 /** @todo This doesn't check the G bit of 2/4MB pages. FIXME */ 1842 if ( 1843 && 1844 && PdeSrc.n.u1Present1845 && PdeSrc.n.u1User == PdeDst.n.u1User1846 && (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write)1844 if ( fPdeValid 1845 && pShwPage->GCPhys == GCPhys 1846 && (PdeSrc.u & X86_PDE_P) 1847 && (PdeSrc.u & X86_PDE_US) == (PdeDst.u & X86_PDE_US) 1848 && ((PdeSrc.u & X86_PDE_RW) == (PdeDst.u & X86_PDE_RW) || !(PdeDst.u & X86_PDE_RW)) 1847 1849 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 1848 && (PdeSrc.n.u1NoExecute == PdeDst.n.u1NoExecute|| !GST_IS_NX_ACTIVE(pVCpu))1850 && ((PdeSrc.u & X86_PDE_PAE_NX) == (PdeDst.u & X86_PDE_PAE_NX) || !GST_IS_NX_ACTIVE(pVCpu)) 1849 1851 # endif 1850 1852 ) … … 1855 1857 * check is only meant for dealing with non-#PF'ing paths. 1856 1858 */ 1857 if (PdeSrc. n.u1Accessed)1859 if (PdeSrc.u & X86_PDE_A) 1858 1860 { 1859 1861 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); … … 1954 1956 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM 1955 1957 && ( PGM_PAGE_IS_ZERO(pPage) 1956 || ( PdeSrc.n.u1Write1958 || ( (PdeSrc.u & X86_PDE_RW) 1957 1959 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED 1958 1960 # ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES … … 2008 2010 * As for invlpg, it simply frees the whole shadow PT. 2009 2011 * ...It's possibly because the guest clears it and the guest doesn't really tell us... */ 2010 if ( !PdeSrc.b.u1Dirty 2011 && PdeSrc.b.u1Write) 2012 if ((PdeSrc.u & (X86_PDE4M_D | X86_PDE_RW)) == X86_PDE_RW) 2012 2013 { 2013 2014 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPageBig)); 2014 2015 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY; 2015 PdeDst. n.u1Write = 0;2016 PdeDst.u &= ~(SHWUINT)X86_PDE_RW; 2016 2017 } 2017 2018 else 2018 2019 { 2019 PdeDst. au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;2020 PdeDst. n.u1Write = PdeSrc.n.u1Write;2020 PdeDst.u &= ~(SHWUINT)(PGM_PDFLAGS_TRACK_DIRTY | X86_PDE_RW); 2021 PdeDst.u |= PdeSrc.u & X86_PDE_RW; 2021 2022 } 2022 2023 SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst); … … 2283 2284 * Handle big page. 2284 2285 */ 2285 if (pPdeSrc->b.u1Size && GST_IS_PSE_ACTIVE(pVCpu)) 2286 { 2287 if ( pPdeDst->n.u1Present 2288 && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)) 2289 { 2286 if ((pPdeSrc->u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu)) 2287 { 2288 if ((pPdeSrc->u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) 2289 { 2290 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPageTrap)); 2291 Assert(pPdeSrc->u & X86_PDE_RW); 2292 2293 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2294 * fault again and take this path to only invalidate the entry (see below). */ 2290 2295 SHWPDE PdeDst = *pPdeDst; 2291 2292 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPageTrap)); 2293 Assert(pPdeSrc->b.u1Write); 2294 2295 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2296 * fault again and take this path to only invalidate the entry (see below). 2297 */ 2298 PdeDst.n.u1Write = 1; 2299 PdeDst.n.u1Accessed = 1; 2300 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY; 2296 PdeDst.u &= ~(SHWUINT)PGM_PDFLAGS_TRACK_DIRTY; 2297 PdeDst.u |= X86_PDE_RW | X86_PDE_A; 2301 2298 SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst); 2302 2299 PGM_INVL_BIG_PG(pVCpu, GCPtrPage); … … 2306 2303 # ifdef IN_RING0 2307 2304 /* Check for stale TLB entry; only applies to the SMP guest case. */ 2308 if ( pVM->cCpus > 1 2309 && pPdeDst->n.u1Write 2310 && pPdeDst->n.u1Accessed) 2305 if ( pVM->cCpus > 1 2306 && (pPdeDst->u & (X86_PDE_P | X86_PDE_RW | X86_PDE_A)) == (X86_PDE_P | X86_PDE_RW | X86_PDE_A)) 2311 2307 { 2312 2308 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK); … … 2333 2329 PGSTPT pPTSrc; 2334 2330 int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GST_GET_PDE_GCPHYS(*pPdeSrc), &pPTSrc); 2335 if (RT_FAILURE(rc)) 2336 { 2337 AssertRC(rc); 2338 return rc; 2339 } 2340 2341 if (pPdeDst->n.u1Present) 2331 AssertRCReturn(rc, rc); 2332 2333 if (SHW_PDE_IS_P(*pPdeDst)) 2342 2334 { 2343 2335 GSTPTE const *pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; … … 2551 2543 } 2552 2544 # endif /* !PGM_WITHOUT_MAPPINGS */ 2553 Assert(! PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/2545 Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2554 2546 2555 2547 /* … … 2557 2549 */ 2558 2550 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 2559 const bool fPageTable = ! PdeSrc.b.u1Size|| !GST_IS_PSE_ACTIVE(pVCpu);2560 if ( PdeSrc.n.u1Present2551 const bool fPageTable = !(PdeSrc.u & X86_PDE_PS) || !GST_IS_PSE_ACTIVE(pVCpu); 2552 if ( (PdeSrc.u & X86_PDE_P) 2561 2553 && (fPageTable ? GST_IS_PDE_VALID(pVCpu, PdeSrc) : GST_IS_BIG_PDE_VALID(pVCpu, PdeSrc)) ) 2562 2554 { … … 2582 2574 PGMPOOLACCESS enmAccess; 2583 2575 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2584 const bool fNoExecute = PdeSrc.n.u1NoExecute&& GST_IS_NX_ACTIVE(pVCpu);2576 const bool fNoExecute = (PdeSrc.u & X86_PDE_PAE_NX) && GST_IS_NX_ACTIVE(pVCpu); 2585 2577 # else 2586 2578 const bool fNoExecute = false; … … 2593 2585 # endif 2594 2586 /* Determine the right kind of large page to avoid incorrect cached entry reuse. */ 2595 if (PdeSrc. n.u1User)2587 if (PdeSrc.u & X86_PDE_US) 2596 2588 { 2597 if (PdeSrc. n.u1Write)2589 if (PdeSrc.u & X86_PDE_RW) 2598 2590 enmAccess = (fNoExecute) ? PGMPOOLACCESS_USER_RW_NX : PGMPOOLACCESS_USER_RW; 2599 2591 else … … 2602 2594 else 2603 2595 { 2604 if (PdeSrc. n.u1Write)2596 if (PdeSrc.u & X86_PDE_RW) 2605 2597 enmAccess = (fNoExecute) ? PGMPOOLACCESS_SUPERVISOR_RW_NX : PGMPOOLACCESS_SUPERVISOR_RW; 2606 2598 else … … 2624 2616 PdeDst.u = pShwPage->Core.Key | GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, PdeSrc); 2625 2617 /* (see explanation and assumptions further down.) */ 2626 if ( !PdeSrc.b.u1Dirty 2627 && PdeSrc.b.u1Write) 2618 if ((PdeSrc.u & (X86_PDE_RW | X86_PDE4M_D)) == X86_PDE_RW) 2628 2619 { 2629 2620 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPageBig)); 2630 2621 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY; 2631 PdeDst. b.u1Write = 0;2622 PdeDst.u &= ~(SHWUINT)X86_PDE_RW; 2632 2623 } 2633 2624 } … … 2655 2646 * The best idea is to leave this change to the caller and add an 2656 2647 * assertion that it's set already. */ 2657 pPDSrc->a[iPDSrc]. n.u1Accessed = 1;2648 pPDSrc->a[iPDSrc].u |= X86_PDE_A; 2658 2649 if (fPageTable) 2659 2650 { … … 2763 2754 /** @todo move the above stuff to a section in the PGM documentation. */ 2764 2755 Assert(!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)); 2765 if ( !PdeSrc.b.u1Dirty 2766 && PdeSrc.b.u1Write) 2756 if ((PdeSrc.u & (X86_PDE_RW | X86_PDE4M_D)) == X86_PDE_RW) 2767 2757 { 2768 2758 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPageBig)); 2769 2759 PdeDst.u |= PGM_PDFLAGS_TRACK_DIRTY; 2770 PdeDst. b.u1Write = 0;2760 PdeDst.u &= ~(SHWUINT)X86_PDE_RW; 2771 2761 } 2772 2762 SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst); … … 2890 2880 } 2891 2881 else 2892 AssertRelease(! PdeDst.n.u1Present);2882 AssertRelease(!SHW_PDE_IS_P(PdeDst)); 2893 2883 2894 2884 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncPT), a); … … 3029 3019 | (PdeDst.u & X86_PDE_AVL_MASK) /** @todo do we need this? */; 3030 3020 # else 3031 PdeDst.u &= X86_PDE_AVL_MASK; 3032 PdeDst.n.u1Present = 1; 3033 PdeDst.n.u1Write = 1; 3034 PdeDst.b.u1Size = 1; 3035 PdeDst.n.u1User = 1; 3036 PdeDst.u |= HCPhys; /* Note! Must be done last of gcc v10.2.1 20200723 (Red Hat 10.2.1-1) may drop the top 32 bits. */ 3021 PdeDst.u = HCPhys | X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PS 3022 | (PdeDst.u & X86_PDE_AVL_MASK) /** @todo PGM_PD_FLAGS? */; 3037 3023 # endif 3038 3024 SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst); … … 3101 3087 | (PdeDst.u & X86_PDE_AVL_MASK /** @todo do we really need this? */); 3102 3088 # else 3103 PdeDst.u &= X86_PDE_AVL_MASK; 3104 PdeDst.n.u1Present = 1; 3105 PdeDst.n.u1Write = 1; 3106 PdeDst.n.u1User = 1; 3107 PdeDst.n.u1Accessed = 1; 3108 PdeDst.u |= pShwPage->Core.Key; /* Note! Must be done last of gcc v10.2.1 20200723 (Red Hat 10.2.1-1) drops the top 32 bits. */ 3109 /** @todo r=bird: Stop using bitfields. But we need to defined/find the EPT flags then. */ 3089 PdeDst.u = pShwPage->Core.Key | X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A 3090 | (PdeDst.u & X86_PDE_AVL_MASK /** @todo use a PGM_PD_FLAGS define */); 3110 3091 # endif 3111 3092 SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst); … … 3171 3152 PGSTPD pPDSrc = NULL; 3172 3153 const unsigned iPDSrc = 0; 3173 GSTPDE PdeSrc; 3174 3175 PdeSrc.u = 0; /* faked so we don't have to #ifdef everything */ 3176 PdeSrc.n.u1Present = 1; 3177 PdeSrc.n.u1Write = 1; 3178 PdeSrc.n.u1Accessed = 1; 3179 PdeSrc.n.u1User = 1; 3180 # endif 3181 3182 if (PdeSrc.n.u1Present && PdeSrc.n.u1Accessed) 3154 GSTPDE const PdeSrc = { X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A }; /* faked so we don't have to #ifdef everything */ 3155 # endif 3156 3157 if ((PdeSrc.u & (X86_PDE_P | X86_PDE_A)) == (X86_PDE_P | X86_PDE_A)) 3183 3158 { 3184 3159 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 3237 3212 # endif 3238 3213 { 3239 if (! PdeDst.n.u1Present)3214 if (!(PdeDst.u & X86_PDE_P)) 3240 3215 { 3241 3216 /** @todo r=bird: This guy will set the A bit on the PDE, … … 3386 3361 # endif 3387 3362 3388 if (! pPdeDst->n.u1Present)3363 if (!(pPdeDst->u & X86_PDE_P)) 3389 3364 { 3390 3365 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage); … … 3409 3384 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 3410 3385 # else 3411 GSTPDE PdeSrc; 3412 PdeSrc.u = 0; /* faked so we don't have to #ifdef everything */ 3413 PdeSrc.n.u1Present = 1; 3414 PdeSrc.n.u1Write = 1; 3415 PdeSrc.n.u1Accessed = 1; 3416 PdeSrc.n.u1User = 1; 3386 GSTPDE const PdeSrc = { X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A }; /* faked so we don't have to #ifdef everything */ 3417 3387 # endif 3418 3388 … … 3809 3779 3810 3780 const GSTPDE PdeSrc = pPDSrc->a[(iPDDst >> (GST_PD_SHIFT - SHW_PD_SHIFT)) & GST_PD_MASK]; 3811 if (! PdeSrc.n.u1Present)3781 if (!(PdeSrc.u & X86_PDE_P)) 3812 3782 { 3813 3783 AssertMsgFailed(("Guest PDE at %RGv is not present! PdeDst=%#RX64 PdeSrc=%#RX64\n", … … 3817 3787 } 3818 3788 3819 if ( !PdeSrc.b.u1Size3820 || 3789 if ( !(PdeSrc.u & X86_PDE_PS) 3790 || !fBigPagesSupported) 3821 3791 { 3822 3792 GCPhysGst = GST_GET_PDE_GCPHYS(PdeSrc); … … 3842 3812 } 3843 3813 3844 if ( 3845 != (!PdeSrc.b.u1Size|| !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))3814 if ( pPoolPage->enmKind 3815 != (!(PdeSrc.u & X86_PDE_PS) || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG)) 3846 3816 { 3847 3817 AssertMsgFailed(("Invalid shadow page table kind %d at %RGv! PdeSrc=%#RX64\n", … … 3867 3837 } 3868 3838 3869 if ( ! PdeSrc.b.u1Size3839 if ( !(PdeSrc.u & X86_PDE_PS) 3870 3840 || !fBigPagesSupported) 3871 3841 { … … 4100 4070 */ 4101 4071 uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD; 4102 if ( !PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)4072 if ((PdeSrc.u & (X86_PDE_RW | X86_PDE4M_D)) == X86_PDE_RW) 4103 4073 { 4104 if (PdeDst. n.u1Write)4074 if (PdeDst.u & X86_PDE_RW) 4105 4075 { 4106 4076 AssertMsgFailed(("!DIRTY page at %RGv is writable! PdeSrc=%#RX64 PdeDst=%#RX64\n", … … 4131 4101 { 4132 4102 /* access bit emulation (not implemented). */ 4133 if ( PdeSrc.b.u1Accessed || PdeDst.n.u1Present)4103 if ((PdeSrc.u & X86_PDE_A) || SHW_PDE_IS_P(PdeDst)) 4134 4104 { 4135 4105 AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n", … … 4138 4108 continue; 4139 4109 } 4140 if (! PdeDst.n.u1Accessed)4110 if (!SHW_PDE_IS_A(PdeDst)) 4141 4111 { 4142 4112 AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n", -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r86466 r86476 208 208 GSTPDE Pde; 209 209 pWalk->Pde.u = Pde.u = pPde->u; 210 if (Pde. n.u1Present) { /* probable */ }210 if (Pde.u & X86_PDE_P) { /* probable */ } 211 211 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2); 212 if ( Pde.n.u1Size&& GST_IS_PSE_ACTIVE(pVCpu))212 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu)) 213 213 { 214 214 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ } -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r86466 r86476 20 20 * Defined Constants And Macros * 21 21 *********************************************************************************************************************************/ 22 #undef SHWUINT 22 23 #undef SHWPT 23 24 #undef PSHWPT … … 34 35 #undef SHW_PDE_ATOMIC_SET2 35 36 #undef SHW_PDE_IS_P 37 #undef SHW_PDE_IS_A 36 38 #undef SHW_PDE_IS_BIG 37 39 #undef SHW_PTE_PG_MASK … … 59 61 60 62 #if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT 63 # define SHWUINT uint32_t 61 64 # define SHWPT X86PT 62 65 # define PSHWPT PX86PT … … 72 75 # define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES 73 76 # define SHW_PDE_IS_P(Pde) ( (Pde).n.u1Present ) 77 # define SHW_PDE_IS_A(Pde) ( (Pde).n.u1Accessed ) 74 78 # define SHW_PDE_IS_BIG(Pde) ( (Pde).b.u1Size ) 75 79 # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU32(&(Pde).u, (uNew)); } while (0) … … 95 99 96 100 #elif PGM_SHW_TYPE == PGM_TYPE_EPT 101 # define SHWUINT uint64_t 97 102 # define SHWPT EPTPT 98 103 # define PSHWPT PEPTPT … … 107 112 # define SHW_PD_MASK EPT_PD_MASK 108 113 # define SHW_PDE_IS_P(Pde) ( (Pde).u & EPT_E_READ /* always set*/ ) 114 # define SHW_PDE_IS_A(Pde) ( 1 ) /* We don't use EPT_E_ACCESSED, use with care! */ 109 115 # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & EPT_E_LEAF ) 110 116 # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0) … … 134 140 135 141 #else 142 # define SHWUINT uint64_t 136 143 # define SHWPT PGMSHWPTPAE 137 144 # define PSHWPT PPGMSHWPTPAE … … 146 153 # define SHW_PD_MASK X86_PD_PAE_MASK 147 154 # define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P ) 155 # define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A ) 148 156 # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS ) 149 157 # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.