VirtualBox

Changeset 26018 in vbox


Ignore:
Timestamp:
Jan 25, 2010 4:06:26 PM (15 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
56903
Message:

Missing pending pgm sync handling in VT-x and AMD-V page fault handling. Cause of win2k3 smp guest instability and possibly many others.
Cleaned up InvalidatePage (removed obsolete code).

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r25937 r26018  
    10171017        AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
    10181018        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
    1019         if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    1020             PGM_INVL_VCPU_TLBS(pVCpu);
    10211019        return VINF_SUCCESS;
    10221020    }
     
    10291027    {
    10301028        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
    1031         if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    1032             PGM_INVL_VCPU_TLBS(pVCpu);
    10331029        return VINF_SUCCESS;
    10341030    }
     
    11011097# endif /* IN_RING3 */
    11021098
    1103 # if PGM_GST_TYPE == PGM_TYPE_AMD64
    1104     /* Fetch the pgm pool shadow descriptor. */
    1105     PPGMPOOLPAGE pShwPdpt = pgmPoolGetPage(pPool, pPml4eDst->u & X86_PML4E_PG_MASK);
    1106     Assert(pShwPdpt);
    1107 
    1108     /* Fetch the pgm pool shadow descriptor. */
    1109     PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & SHW_PDPE_PG_MASK);
    1110     Assert(pShwPde);
    1111 
    1112     Assert(pPml4eDst->n.u1Present && (pPml4eDst->u & SHW_PDPT_MASK));
    1113     RTGCPHYS GCPhysPdpt = pPml4eSrc->u & X86_PML4E_PG_MASK;
    1114 
    1115     if (    !pPml4eSrc->n.u1Present
    1116         ||  pShwPdpt->GCPhys != GCPhysPdpt)
    1117     {
    1118         LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
    1119                  GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
    1120         pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
    1121         ASMAtomicWriteSize(pPml4eDst, 0);
    1122         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
    1123         PGM_INVL_VCPU_TLBS(pVCpu);
    1124         return VINF_SUCCESS;
    1125     }
    1126     if (   pPml4eSrc->n.u1User != pPml4eDst->n.u1User
    1127         || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write))
    1128     {
    1129         /*
    1130          * Mark not present so we can resync the PML4E when it's used.
    1131          */
    1132         LogFlow(("InvalidatePage: Out-of-sync PML4E at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
    1133                  GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
    1134         pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
    1135         ASMAtomicWriteSize(pPml4eDst, 0);
    1136         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
    1137         PGM_INVL_VCPU_TLBS(pVCpu);
    1138     }
    1139     else if (!pPml4eSrc->n.u1Accessed)
    1140     {
    1141         /*
    1142          * Mark not present so we can set the accessed bit.
    1143          */
    1144         LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
    1145                  GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
    1146         pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
    1147         ASMAtomicWriteSize(pPml4eDst, 0);
    1148         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
    1149         PGM_INVL_VCPU_TLBS(pVCpu);
    1150     }
    1151 
    1152     /* Check if the PDPT entry has changed. */
    1153     Assert(pPdpeDst->n.u1Present && pPdpeDst->u & SHW_PDPT_MASK);
    1154     RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK;
    1155     if (    !PdpeSrc.n.u1Present
    1156         ||  pShwPde->GCPhys != GCPhysPd)
    1157     {
    1158         LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
    1159                     GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
    1160         pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
    1161         ASMAtomicWriteSize(pPdpeDst, 0);
    1162         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
    1163         PGM_INVL_VCPU_TLBS(pVCpu);
    1164         return VINF_SUCCESS;
    1165     }
    1166     if (   PdpeSrc.lm.u1User != pPdpeDst->lm.u1User
    1167         || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write))
    1168     {
    1169         /*
    1170          * Mark not present so we can resync the PDPTE when it's used.
    1171          */
    1172         LogFlow(("InvalidatePage: Out-of-sync PDPE at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
    1173                  GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
    1174         pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
    1175         ASMAtomicWriteSize(pPdpeDst, 0);
    1176         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
    1177         PGM_INVL_VCPU_TLBS(pVCpu);
    1178     }
    1179     else if (!PdpeSrc.lm.u1Accessed)
    1180     {
    1181         /*
    1182          * Mark not present so we can set the accessed bit.
    1183          */
    1184         LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
    1185                  GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
    1186         pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
    1187         ASMAtomicWriteSize(pPdpeDst, 0);
    1188         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
    1189         PGM_INVL_VCPU_TLBS(pVCpu);
    1190     }
    1191 # endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
    1192 
    11931099    /*
    11941100     * Deal with the Guest PDE.
     
    11971103    if (PdeSrc.n.u1Present)
    11981104    {
     1105        Assert(     PdeSrc.n.u1User == PdeDst.n.u1User
     1106               &&   (PdeSrc.n.u1Write || !PdeDst.n.u1Write));
    11991107# ifndef PGM_WITHOUT_MAPPING
    12001108        if (PdeDst.u & PGM_PDFLAGS_MAPPING)
     
    12111119        else
    12121120# endif /* !PGM_WITHOUT_MAPPING */
    1213         if (   PdeSrc.n.u1User != PdeDst.n.u1User
    1214             || (!PdeSrc.n.u1Write && PdeDst.n.u1Write))
    1215         {
    1216             /*
    1217              * Mark not present so we can resync the PDE when it's used.
    1218              */
    1219             LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
    1220                      GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    1221             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1222             ASMAtomicWriteSize(pPdeDst, 0);
    1223             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
    1224             PGM_INVL_VCPU_TLBS(pVCpu);
    1225         }
    1226         else if (!PdeSrc.n.u1Accessed)
    1227         {
    1228             /*
    1229              * Mark not present so we can set the accessed bit.
    1230              */
    1231             LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
    1232                      GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    1233             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1234             ASMAtomicWriteSize(pPdeDst, 0);
    1235             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
    1236             PGM_INVL_VCPU_TLBS(pVCpu);
    1237         }
    1238         else if (!fIsBigPage)
     1121        if (!fIsBigPage)
    12391122        {
    12401123            /*
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r26013 r26018  
    17121712
    17131713                TRPMResetTrap(pVCpu);
    1714 
    17151714                STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
     1715
     1716                /* Check if a sync operation is pending. */
     1717                if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
     1718                {
     1719                    rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
     1720                    AssertRC(rc);
     1721                    if (rc != VINF_SUCCESS)
     1722                    {
     1723                        Log(("Pending pool sync is forcing us back to ring 3; rc=%d\n", rc));
     1724                        break;
     1725                    }
     1726                }                   
     1727
    17161728                goto ResumeExecution;
    17171729            }
     
    18851897            TRPMResetTrap(pVCpu);
    18861898
     1899            Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
     1900
    18871901            STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
    18881902            goto ResumeExecution;
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r26014 r26018  
    28982898                rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);
    28992899                Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc));
     2900
    29002901                if (rc == VINF_SUCCESS)
    29012902                {   /* We've successfully synced our shadow pages, so let's just continue execution. */
     
    29042905
    29052906                    TRPMResetTrap(pVCpu);
    2906 
    29072907                    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     2908
     2909                    /* Check if a sync operation is pending. */
     2910                    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
     2911                    {
     2912                        rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
     2913                        AssertRC(rc);
     2914                        if (rc != VINF_SUCCESS)
     2915                        {
     2916                            Log(("Pending pool sync is forcing us back to ring 3; rc=%d\n", rc));
     2917                            break;
     2918                        }
     2919                    }                   
    29082920                    goto ResumeExecution;
    29092921                }
     
    33953407            TRPMResetTrap(pVCpu);
    33963408
     3409            Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    33973410            goto ResumeExecution;
    33983411        }
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette