VirtualBox

Changeset 19772 in vbox


Ignore:
Timestamp:
May 18, 2009 10:05:40 AM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
47404
Message:

Backed out 47401-3 for now

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/DBGFSym.cpp

    r19649 r19772  
    2525*******************************************************************************/
    2626#define LOG_GROUP LOG_GROUP_DBGF
    27 #if defined(RT_OS_WINDOWS) && 0 //defined(DEBUG_bird) // enabled this is you want to debug win32 guests, the hypervisor of EFI.
     27#if defined(RT_OS_WINDOWS) && 1 //defined(DEBUG_bird) // enabled this is you want to debug win32 guests, the hypervisor of EFI.
    2828# include <Windows.h>
    2929# define _IMAGEHLP64
  • trunk/src/VBox/VMM/PGM.cpp

    r19769 r19772  
    17911791        PGM_REG_COUNTER(&pPGM->StatRZDirtyPageSkipped,            "/PGM/CPU%d/RZ/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
    17921792        PGM_REG_COUNTER(&pPGM->StatRZDirtyPageTrap,               "/PGM/CPU%d/RZ/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
    1793         PGM_REG_COUNTER(&pPGM->StatRZDirtyPageStale,              "/PGM/CPU%d/RZ/DirtyPage/Stale",            "The number of traps generated for dirty bit tracking (stale tlb entries).");
    17941793        PGM_REG_COUNTER(&pPGM->StatRZDirtiedPage,                 "/PGM/CPU%d/RZ/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
    17951794        PGM_REG_COUNTER(&pPGM->StatRZDirtyTrackRealPF,            "/PGM/CPU%d/RZ/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
     
    18361835        PGM_REG_COUNTER(&pPGM->StatR3DirtyPageSkipped,            "/PGM/CPU%d/R3/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
    18371836        PGM_REG_COUNTER(&pPGM->StatR3DirtyPageTrap,               "/PGM/CPU%d/R3/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
    1838         PGM_REG_COUNTER(&pPGM->StatR3DirtyPageStale,              "/PGM/CPU%d/R3/DirtyPage/Stale",            "The number of traps generated for dirty bit tracking (stale tlb entries).");
    18391837        PGM_REG_COUNTER(&pPGM->StatR3DirtiedPage,                 "/PGM/CPU%d/R3/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
    18401838        PGM_REG_COUNTER(&pPGM->StatR3DirtyTrackRealPF,            "/PGM/CPU%d/R3/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
  • trunk/src/VBox/VMM/PGMInternal.h

    r19769 r19772  
    27842784    STAMCOUNTER StatRZDirtyPageSkipped;             /**< RC/R0: The number of pages already dirty or readonly. */
    27852785    STAMCOUNTER StatRZDirtyPageTrap;                /**< RC/R0: The number of traps generated for dirty bit tracking. */
    2786     STAMCOUNTER StatRZDirtyPageStale;               /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
    27872786    STAMCOUNTER StatRZDirtyTrackRealPF;             /**< RC/R0: The number of real pages faults during dirty bit tracking. */
    27882787    STAMCOUNTER StatRZDirtiedPage;                  /**< RC/R0: The number of pages marked dirty because of write accesses. */
     
    28292828    STAMCOUNTER StatR3DirtyPageSkipped;             /**< R3: The number of pages already dirty or readonly. */
    28302829    STAMCOUNTER StatR3DirtyPageTrap;                /**< R3: The number of traps generated for dirty bit tracking. */
    2831     STAMCOUNTER StatR3DirtyPageStale;               /**< R3: The number of traps generated for dirty bit tracking (stale TLB entries). */
    28322830    STAMCOUNTER StatR3DirtyTrackRealPF;             /**< R3: The number of real pages faults during dirty bit tracking. */
    28332831    STAMCOUNTER StatR3DirtiedPage;                  /**< R3: The number of pages marked dirty because of write accesses. */
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r19771 r19772  
    150150#  endif /* !PGM_WITH_PAGING */
    151151
    152     /* Fetch the guest PDE */
     152
     153#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
     154    const unsigned  iPDDst = pvFault >> SHW_PD_SHIFT;
     155    PX86PD          pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
     156
     157#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
     158    const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
     159
     160    PX86PDPAE       pPDDst;
     161#    if PGM_GST_TYPE != PGM_TYPE_PAE
     162    X86PDPE         PdpeSrc;
     163
     164    /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
     165    PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
     166#    endif
     167    rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
     168    if (rc != VINF_SUCCESS)
     169    {
     170        AssertRC(rc);
     171        return rc;
     172    }
     173    Assert(pPDDst);
     174
     175#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     176    const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
     177    PX86PDPAE       pPDDst;
     178#   if PGM_GST_TYPE == PGM_TYPE_PROT
     179    /* AMD-V nested paging */
     180    X86PML4E        Pml4eSrc;
     181    X86PDPE         PdpeSrc;
     182    PX86PML4E       pPml4eSrc = &Pml4eSrc;
     183
     184    /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
     185    Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
     186    PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
     187#   endif
     188
     189    rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
     190    if (rc != VINF_SUCCESS)
     191    {
     192        AssertRC(rc);
     193        return rc;
     194    }
     195    Assert(pPDDst);
     196
     197#  elif PGM_SHW_TYPE == PGM_TYPE_EPT
     198    const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
     199    PEPTPD          pPDDst;
     200
     201    rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
     202    if (rc != VINF_SUCCESS)
     203    {
     204        AssertRC(rc);
     205        return rc;
     206    }
     207    Assert(pPDDst);
     208#  endif
     209
     210#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     211    /*
     212     * If we successfully correct the write protection fault due to dirty bit
     213     * tracking, or this page fault is a genuine one, then return immediately.
     214     */
     215    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
     216    rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
     217    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
     218    if (    rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
     219        ||  rc == VINF_EM_RAW_GUEST_TRAP)
     220    {
     221        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
     222                     = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
     223        LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
     224        return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
     225    }
     226
     227    STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]);
     228#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     229
     230    /*
     231     * A common case is the not-present error caused by lazy page table syncing.
     232     *
     233     * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
     234     * so we can safely assume that the shadow PT is present when calling SyncPage later.
     235     *
     236     * On failure, we ASSUME that SyncPT is out of memory or detected some kind
     237     * of mapping conflict and defer to SyncCR3 in R3.
     238     * (Again, we do NOT support access handlers for non-present guest pages.)
     239     *
     240     */
    153241#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    154242    GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
     
    161249    PdeSrc.n.u1User     = 1;
    162250#  endif
    163 
    164     pgmLock(pVM);
    165     {   /* Force the shadow pointers to go out of scope after releasing the lock. */
    166 #  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    167         const unsigned  iPDDst = pvFault >> SHW_PD_SHIFT;
    168         PX86PD          pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
    169 
    170 #  elif PGM_SHW_TYPE == PGM_TYPE_PAE
    171         const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
    172 
    173         PX86PDPAE       pPDDst;
    174 #    if PGM_GST_TYPE != PGM_TYPE_PAE
    175         X86PDPE         PdpeSrc;
    176 
    177         /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
    178         PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
    179 #    endif
    180         rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
    181         if (rc != VINF_SUCCESS)
     251    if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
     252        &&  !pPDDst->a[iPDDst].n.u1Present
     253        &&  PdeSrc.n.u1Present
     254       )
     255
     256    {
     257        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
     258        STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
     259        LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
     260        pgmLock(pVM);
     261        rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
     262        pgmUnlock(pVM);
     263        if (RT_SUCCESS(rc))
    182264        {
    183             pgmUnlock(pVM);
    184             AssertRC(rc);
     265            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    185266            return rc;
    186267        }
    187         Assert(pPDDst);
    188 
    189 #  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    190         const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
    191         PX86PDPAE       pPDDst;
    192 #   if PGM_GST_TYPE == PGM_TYPE_PROT
    193         /* AMD-V nested paging */
    194         X86PML4E        Pml4eSrc;
    195         X86PDPE         PdpeSrc;
    196         PX86PML4E       pPml4eSrc = &Pml4eSrc;
    197 
    198         /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
    199         Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
    200         PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
    201 #   endif
    202 
    203         rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
    204         if (rc != VINF_SUCCESS)
    205         {
    206             pgmUnlock(pVM);
    207             AssertRC(rc);
    208             return rc;
    209         }   
    210         Assert(pPDDst);
    211 
    212 #  elif PGM_SHW_TYPE == PGM_TYPE_EPT
    213         const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
    214         PEPTPD          pPDDst;
    215 
    216         rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
    217         if (rc != VINF_SUCCESS)
    218         {
    219             pgmUnlock(pVM);
    220             AssertRC(rc);
    221             return rc;
    222         }
    223         Assert(pPDDst);
    224 #  endif
    225 
    226 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    227         /*
    228          * If we successfully correct the write protection fault due to dirty bit
    229          * tracking, or this page fault is a genuine one, then return immediately.
    230          */
    231         STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    232         rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
    233         STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    234         if (    rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
    235             ||  rc == VINF_EM_RAW_GUEST_TRAP)
    236         {
    237             STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
    238                         = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
    239             LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
    240             pgmUnlock(pVM);
    241             return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
    242         }
    243 
    244         STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]);
    245 #  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    246 
    247         /*
    248          * A common case is the not-present error caused by lazy page table syncing.
    249          *
    250          * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
    251          * so we can safely assume that the shadow PT is present when calling SyncPage later.
    252          *
    253          * On failure, we ASSUME that SyncPT is out of memory or detected some kind
    254          * of mapping conflict and defer to SyncCR3 in R3.
    255          * (Again, we do NOT support access handlers for non-present guest pages.)
    256          *
    257          */
    258         if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
    259             &&  !pPDDst->a[iPDDst].n.u1Present
    260             &&  PdeSrc.n.u1Present
    261         )
    262         {
    263             STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
    264             STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    265             LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
    266             rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
    267             pgmUnlock(pVM);
    268             if (RT_SUCCESS(rc))
    269             {
    270                 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    271                 return rc;
    272             }
    273             Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
    274             VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
    275             STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    276             return VINF_PGM_SYNC_CR3;
    277         }
    278         pgmUnlock(pVM);
     268        Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
     269        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
     270        STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
     271        return VINF_PGM_SYNC_CR3;
    279272    }
    280273
     
    10851078                 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
    10861079        pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
    1087         ASMAtomicWriteSize(pPml4eDst, 0);
     1080        pPml4eDst->u = 0;
    10881081        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
    10891082        PGM_INVL_GUEST_TLBS();
     
    10991092                 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
    11001093        pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
    1101         ASMAtomicWriteSize(pPml4eDst, 0);
     1094        pPml4eDst->u = 0;
    11021095        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
    11031096        PGM_INVL_GUEST_TLBS();
     
    11111104                 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
    11121105        pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
    1113         ASMAtomicWriteSize(pPml4eDst, 0);
     1106        pPml4eDst->u = 0;
    11141107        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
    11151108        PGM_INVL_GUEST_TLBS();
     
    11251118                    GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
    11261119        pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
    1127         ASMAtomicWriteSize(pPdpeDst, 0);
     1120        pPdpeDst->u = 0;
    11281121        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
    11291122        PGM_INVL_GUEST_TLBS();
     
    11391132                 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
    11401133        pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
    1141         ASMAtomicWriteSize(pPdpeDst, 0);
     1134        pPdpeDst->u = 0;
    11421135        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
    11431136        PGM_INVL_GUEST_TLBS();
     
    11511144                 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
    11521145        pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
    1153         ASMAtomicWriteSize(pPdpeDst, 0);
     1146        pPdpeDst->u = 0;
    11541147        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
    11551148        PGM_INVL_GUEST_TLBS();
     
    11831176                     GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    11841177            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1185             ASMAtomicWriteSize(pPdeDst, 0);
     1178            pPdeDst->u = 0;
    11861179            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
    11871180            PGM_INVL_GUEST_TLBS();
     
    11951188                     GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    11961189            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1197             ASMAtomicWriteSize(pPdeDst, 0);
     1190            pPdeDst->u = 0;
    11981191            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
    11991192            PGM_INVL_GUEST_TLBS();
     
    12391232                         GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
    12401233                pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1241                 ASMAtomicWriteSize(pPdeDst, 0);
     1234                pPdeDst->u = 0;
    12421235                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
    12431236                PGM_INVL_GUEST_TLBS();
     
    12851278                     GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    12861279            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1287             ASMAtomicWriteSize(pPdeDst, 0);
     1280            pPdeDst->u = 0;
    12881281            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages));
    12891282            PGM_INVL_BIG_PG(GCPtrPage);
     
    12981291        {
    12991292            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1300             ASMAtomicWriteSize(pPdeDst, 0);
     1293            pPdeDst->u = 0;
    13011294            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
    13021295            PGM_INVL_PG(GCPtrPage);
     
    19101903     */
    19111904    pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
    1912     ASMAtomicWriteSize(pPdeDst, 0);
     1905
     1906    pPdeDst->u = 0;
    19131907
    19141908# if defined(IN_RC)
     
    20772071    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    20782072
    2079     Assert(PGMIsLockOwner(pVM));
    2080 
    20812073    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
    20822074    LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
     
    21642156            pPdeSrc->b.u1Dirty = 1;
    21652157
    2166             if (pPdeDst->n.u1Present)
     2158            if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
    21672159            {
    2168                 if (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)
    2169                 {
    2170                     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
    2171                     Assert(pPdeSrc->b.u1Write);
    2172 
    2173                     /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
    2174                      *       fault again and take this path to only invalidate the entry.
    2175                      */
    2176                     pPdeDst->n.u1Write      = 1;
    2177                     pPdeDst->n.u1Accessed   = 1;
    2178                     pPdeDst->au32[0]       &= ~PGM_PDFLAGS_TRACK_DIRTY;
    2179                 }
    2180                 else
    2181                 {
    2182                     /* Stale TLB entry. */
    2183                     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
    2184                 }
     2160                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
     2161
     2162                Assert(pPdeSrc->b.u1Write);
     2163
     2164                pPdeDst->n.u1Write      = 1;
     2165                pPdeDst->n.u1Accessed   = 1;
     2166                pPdeDst->au32[0]       &= ~PGM_PDFLAGS_TRACK_DIRTY;
    21852167                PGM_INVL_BIG_PG(GCPtrPage);
    21862168                STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
     
    22742256                    PSHWPT      pPTDst   = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
    22752257                    PSHWPTE     pPteDst  = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
    2276                     if (pPteDst->n.u1Present)    /** @todo Optimize accessed bit emulation? */
     2258                    if (    pPteDst->n.u1Present    /** @todo Optimize accessed bit emulation? */
     2259                        &&  (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY))
    22772260                    {
    2278                         if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)
    2279                         {
    2280                             LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
     2261                        LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
    22812262#  ifdef VBOX_STRICT
    2282                             PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
    2283                             if (pPage)
    2284                                 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
    2285                                         ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
     2263                        PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
     2264                        if (pPage)
     2265                            AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
     2266                                      ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
    22862267#  endif
    2287                             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
    2288 
    2289                             Assert(pPteSrc->n.u1Write);
    2290 
    2291                             /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
    2292                              *       fault again and take this path to only invalidate the entry.
    2293                              */
    2294                             pPteDst->n.u1Write    = 1;
    2295                             pPteDst->n.u1Dirty    = 1;
    2296                             pPteDst->n.u1Accessed = 1;
    2297                             pPteDst->au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
    2298                         }
    2299                         else
    2300                         {
    2301                             /* Stale TLB entry. */
    2302                             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
    2303                         }
     2268                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
     2269
     2270                        Assert(pPteSrc->n.u1Write);
     2271
     2272                        pPteDst->n.u1Write    = 1;
     2273                        pPteDst->n.u1Dirty    = 1;
     2274                        pPteDst->n.u1Accessed = 1;
     2275                        pPteDst->au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
    23042276                        PGM_INVL_PG(GCPtrPage);
    23052277
     
    31403112    int             rc = VINF_SUCCESS;
    31413113
    3142     pgmLock(pVM);
    3143 
    31443114    /*
    31453115     * First check if the shadow pd is present.
     
    31603130    if (rc != VINF_SUCCESS)
    31613131    {
    3162         pgmUnlock(pVM);
    31633132        AssertRC(rc);
    31643133        return rc;
     
    31863155    if (rc != VINF_SUCCESS)
    31873156    {
    3188         pgmUnlock(pVM);
    31893157        AssertRC(rc);
    31903158        return rc;
     
    32013169    if (!pPdeDst->n.u1Present)
    32023170    {
     3171        pgmLock(pVM);
    32033172        rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
     3173        pgmUnlock(pVM);
     3174        AssertRC(rc);
    32043175        if (rc != VINF_SUCCESS)
    32053176        {
     
    32083179            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    32093180# endif
    3210             pgmUnlock(pVM);
    3211             AssertRC(rc);
    32123181            return rc;
    32133182        }
     
    32223191    {
    32233192        GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
    3224 # else
     3193#else
    32253194    {
    32263195        GSTPDE PdeSrc;
     
    32313200        PdeSrc.n.u1User     = 1;
    32323201
    3233 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     3202#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    32343203        Assert(rc != VINF_EM_RAW_GUEST_TRAP);
    32353204        if (uErr & X86_TRAP_PF_US)
     
    32553224    PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    32563225# endif
    3257     pgmUnlock(pVM);
    32583226    return rc;
    32593227
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r19709 r19772  
    672672                    Assert(!pTimer->offPrev);
    673673                    Assert(!pTimer->offNext);
     674/*
    674675                    AssertMsg(      pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
    675676                              ||    pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
    676677                              ||    u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
    677678                              ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
     679*/
    678680                    pTimer->u64Expire = u64Expire;
    679681                    TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r19746 r19772  
    607607    else
    608608    {
    609         LogFlow(("INJ-EI: %x at %RGv\n", iGate, (RTGCPTR)pCtx->rip));
     609        Log(("CPU%d: INJ-EI: %x at %RGv\n", pVCpu->idCpu, iGate, (RTGCPTR)pCtx->rip));
    610610        Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    611611        Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || pCtx->eflags.u32 & X86_EFL_IF);
     
    10761076
    10771077    u32TrapMask = HWACCM_VMX_TRAP_MASK;
    1078 #ifndef DEBUG
     1078//#ifndef DEBUG
    10791079    if (pVM->hwaccm.s.fNestedPaging)
    10801080        u32TrapMask &= ~RT_BIT(X86_XCPT_PF);   /* no longer need to intercept #PF. */
    1081 #endif
     1081//#endif
    10821082
    10831083    /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
     
    28742874            errCode |= X86_TRAP_PF_P;
    28752875
    2876         Log(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode));
     2876        LogFlow(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode));
    28772877
    28782878        /* GCPhys contains the guest physical address of the page fault. */
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette