VirtualBox

Changeset 16232 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jan 26, 2009 2:09:56 PM (16 years ago)
Author:
vboxsync
Message:

More shadow paging updates (disabled)

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r16203 r16232  
    7272DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    7373DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
    74 DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    7574DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
    7675#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    7776DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
     77DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    7878#endif
    7979
     
    809809
    810810/**
    811  * Syncs the SHADOW page directory pointer for the specified address.
    812  *
    813  * Allocates backing pages in case the PDPT entry is missing.
    814  *
    815  * @returns VBox status.
    816  * @param   pVM         VM handle.
    817  * @param   GCPtr       The address.
    818  * @param   pGstPdpe    Guest PDPT entry
    819  * @param   ppPD        Receives address of page directory
    820  * @remarks Unused.
    821  */
    822 DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
    823 {
    824     PPGM           pPGM   = &pVM->pgm.s;
    825     PPGMPOOL       pPool  = pPGM->CTX_SUFF(pPool);
    826     PPGMPOOLPAGE   pShwPage;
    827     int            rc;
    828 
    829     Assert(!HWACCMIsNestedPagingActive(pVM));
    830 
    831     const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    832     PX86PDPT  pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    833     PX86PDPE  pPdpe = &pPdpt->a[iPdPt];
    834 
    835     /* Allocate page directory if not present. */
    836     if (    !pPdpe->n.u1Present
    837         &&  !(pPdpe->u & X86_PDPE_PG_MASK))
    838     {
    839         PX86PDPE pPdptGst = pgmGstGetPaePDPEPtr(pPGM, GCPtr);
    840 
    841         Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
    842         /* Create a reference back to the PDPT by using the index in its shadow page. */
    843         rc = pgmPoolAlloc(pVM, pPdptGst->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
    844         if (rc == VERR_PGM_POOL_FLUSHED)
    845         {
    846             Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
    847             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    848             return VINF_PGM_SYNC_CR3;
    849         }
    850         AssertRCReturn(rc, rc);
    851     }
    852     else
    853     {
    854         pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
    855         AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
    856     }
    857     /* The PD was cached or created; hook it up now. */
    858     pPdpe->u |= pShwPage->Core.Key
    859              |  (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
    860 
    861     *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
    862     return VINF_SUCCESS;
    863 }
    864 
    865 
    866 /**
    867811 * Gets the SHADOW page directory pointer for the specified address.
    868812 *
     
    898842
    899843#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     844
     845/**
     846 * Gets the shadow page directory for the specified address, PAE.
     847 *
     848 * @returns Pointer to the shadow PD.
     849 * @param   pVM         VM handle.
     850 * @param   GCPtr       The address.
     851 * @param   pGstPdpe    Guest PDPT entry
     852 * @param   ppPD        Receives address of page directory
     853 */
     854DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
     855{
     856    const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
     857    PX86PDPT       pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
     858    PX86PDPE       pPdpe = &pPdpt->a[iPdPt];
     859    PPGMPOOL       pPool         = pVM->pgm.s.CTX_SUFF(pPool);
     860    bool           fNestedPaging = HWACCMIsNestedPagingActive(pVM);
     861    PPGMPOOLPAGE   pShwPage;
     862    int            rc;
     863
     864    /* Allocate page directory if not present. */
     865    if (    !pPdpe->n.u1Present
     866        &&  !(pPdpe->u & X86_PDPE_PG_MASK))
     867    {
     868        if (!fNestedPaging)
     869        {
     870            Assert(pGstPdpe);
     871            Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
     872            /* Create a reference back to the PDPT by using the index in its shadow page. */
     873            rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
     874        }
     875        else
     876        {
     877            /* AMD-V nested paging. (Intel EPT never comes here) */
     878            RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
     879
     880            rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_PAE_PD_PHYS_PROT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
     881        }
     882
     883        if (rc == VERR_PGM_POOL_FLUSHED)
     884        {
     885            Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n"));
     886            Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
     887            VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     888            return VINF_PGM_SYNC_CR3;
     889        }
     890        AssertRCReturn(rc, rc);
     891    }
     892    else
     893    {
     894        pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
     895        AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
     896    }
     897    /* The PD was cached or created; hook it up now. */
     898    pPdpe->u |= pShwPage->Core.Key
     899             | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
     900
     901    *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
     902    return VINF_SUCCESS;
     903}
     904
    900905/**
    901906 * Gets the pointer to the shadow page directory entry for an address, PAE.
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r16203 r16232  
    109109#    if PGM_GST_TYPE == PGM_TYPE_PAE
    110110    unsigned        iPDSrc;
     111#     ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     112    X86PDPE         PdpeSrc;
     113    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, &PdpeSrc);
     114#     else
    111115    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, NULL);
     116#     endif
    112117
    113118#    elif PGM_GST_TYPE == PGM_TYPE_AMD64
     
    120125    Assert(pPml4eSrc);
    121126#    endif
    122     /* Quick check for a valid guest trap. */
     127
     128    /* Quick check for a valid guest trap. (PAE & AMD64) */
    123129    if (!pPDSrc)
    124130    {
     
    146152#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
    147153    const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
     154
     155#   ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     156#    if PGM_GST_TYPE != PGM_TYPE_PAE
     157    PX86PDPAE       pPDDst;
     158    X86PDPE         PdpeSrc;
     159
     160    /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
     161    PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
     162#    endif
     163    rc = pgmShwSyncPaePDPtr(pVM, pvFault, &PdpeSrc, &pPDDst);
     164    if (rc != VINF_SUCCESS)
     165    {
     166        AssertRC(rc);
     167        return rc;
     168    }
     169    Assert(pPDDst);
     170
     171#   else
    148172    PX86PDPAE       pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, pvFault);
    149173
     
    153177    if (!pPdptDst->a[iPdpt].n.u1Present)
    154178        pPdptDst->a[iPdpt].n.u1Present = 1;
     179#   endif
    155180
    156181#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r16203 r16232  
    419419                    Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
    420420                    VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
     421                    STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict));
    421422                    LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
    422423                }
     
    432433                    {
    433434                        Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
     435                        STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict));
    434436                        VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
    435437                        LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
     
    508510                    Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
    509511                    VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
     512                    STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict));
    510513                    LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
    511514                }
     
    543546                        Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
    544547                        VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
     548                        STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict));
    545549                        LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
    546550                    }
     
    579583                    {
    580584                        Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
     585                        STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict));
    581586                        VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
    582587                        LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
     
    593598                        {
    594599                            Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
     600                            STAM_COUNTER_INC(&(pPool->CTX_SUFF(pVM)->pgm.s.StatRZGuestCR3WriteConflict));
    595601                            VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
    596602                            LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette