VirtualBox

Changeset 9682 in vbox


Ignore:
Timestamp:
Jun 13, 2008 1:27:40 PM (17 years ago)
Author:
vboxsync
Message:

Long mode paging updates

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r9596 r9682  
    723723#ifndef IN_GC
    724724/**
    725  * Gets the SHADOW page directory pointer for the specified address. Allocates
    726  * backing pages in case the PDPT or page dirctory is missing.
     725 * Syncs the SHADOW page directory pointer for the specified address. Allocates
     726 * backing pages in case the PDPT or PML4 entry is missing.
    727727 *
    728728 * @returns VBox status.
    729729 * @param   pVM         VM handle.
    730730 * @param   GCPtr       The address.
     731 * @param   pGstPml4e   Guest PML4 entry
     732 * @param   pGstPdpe    Guest PDPT entry
    731733 * @param   ppPD        Receives address of page directory
    732734 */
    733 PGMDECL(int) PGMShwGetAllocLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPAE *ppPD)
     735PGMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
    734736{
    735737    PPGM           pPGM   = &pVM->pgm.s;
     
    756758
    757759        /* The PDPT was cached or created; hook it up now. */
    758         pPml4e->u |= pShwPage->Core.Key;
     760        pPml4e->u |=   pShwPage->Core.Key
     761                    | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
    759762    }
    760763    else
    761764    {
     765        AssertMsg((pGstPml4e->u & (X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX)) == (pPml4e->u & (X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX)), ("pGstMpl4e.u=%RX64 pPml4e->u=%RX64\n", pGstPml4e->u, pPml4e->u));
     766
    762767        pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
    763768        AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
     
    784789
    785790        /* The PD was cached or created; hook it up now. */
    786         pPdpe->u |= pShwPage->Core.Key;
     791        pPdpe->u |=    pShwPage->Core.Key
     792                    | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
    787793    }
    788794    else
    789795    {
     796        AssertMsg((pGstPdpe->u & (X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX)) == (pPdpe->u & (X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX)), ("pGstPdpe.u=%RX64 pPdpe->u=%RX64\n", pGstPdpe->u, pPdpe->u));
     797
    790798        pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
    791799        AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r9620 r9682  
    110110#   elif PGM_GST_TYPE == PGM_TYPE_AMD64
    111111    unsigned     iPDSrc;
    112     PX86PML4E    pPml4e;
    113     X86PDPE      Pdpe;
     112    PX86PML4E    pPml4eSrc;
     113    X86PDPE      PdpeSrc;
    114114    PGSTPD       pPDSrc;
    115115
    116     pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4e, &Pdpe, &iPDSrc);
    117     Assert(pPml4e);
     116    pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
     117    Assert(pPml4eSrc);
    118118#   endif
    119119    /* Quick check for a valid guest trap. */
     
    149149    const unsigned  iPDDst = (((RTGCUINTPTR)pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
    150150    PX86PDPAE       pPDDst;
    151 
    152     rc = PGMShwGetAllocLongModePDPtr(pVM, (RTGCUINTPTR)pvFault, &pPDDst);
     151#  if PGM_GST_TYPE == PGM_TYPE_PROT
     152    /* AMD-V nested paging */
     153    X86PML4E     Pml4eSrc;
     154    X86PDPE      PdpeSrc;
     155    PX86PML4E    pPml4eSrc = &Pml4eSrc;
     156
     157    /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
     158    Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
     159    PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
     160#  endif
     161
     162    rc = PGMShwSyncLongModePDPtr(pVM, (RTGCUINTPTR)pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
    153163    if (rc != VINF_SUCCESS)
    154164    {
     
    14551465    PX86PDPAE       pPDDst;
    14561466    X86PDEPAE       PdeDst;
    1457 
    1458     int rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst);
    1459     if (rc != VINF_SUCCESS)
    1460     {
    1461         AssertMsg(rc == VINF_PGM_SYNC_CR3, ("Unexpected rc=%Vrc\n", rc));
    1462         return rc;
    1463     }
    1464     Assert(pPDDst);
     1467    PX86PDPT        pPdpt;
     1468
     1469    int rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdpt, &pPDDst);
     1470    AssertRCReturn(rc, rc);
     1471    Assert(pPDDst && pPdpt);
    14651472    PdeDst = pPDDst->a[iPDDst];
    14661473# endif
     
    21482155    const unsigned  iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    21492156    PX86PDPAE       pPDDst;
    2150     rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst);
     2157    PX86PDPT        pPdpt;
     2158    rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdpt, &pPDDst);
    21512159    if (rc != VINF_SUCCESS)
    21522160    {
     
    25952603#  elif PGM_GST_TYPE == PGM_TYPE_AMD64
    25962604    unsigned        iPDSrc;
    2597     PX86PML4E       pPml4e;
    2598     X86PDPE         Pdpe;
    2599     PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4e, &Pdpe, &iPDSrc);
     2605    PX86PML4E       pPml4eSrc;
     2606    X86PDPE         PdpeSrc;
     2607    PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
    26002608    if (!pPDSrc)
    26012609        return VINF_SUCCESS; /* not present */
     
    26252633        X86PDEPAE       PdeDst;
    26262634
    2627         int rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst);
     2635#  if PGM_GST_TYPE == PGM_TYPE_PROT
     2636        /* AMD-V nested paging */
     2637        X86PML4E     Pml4eSrc;
     2638        X86PDPE      PdpeSrc;
     2639        PX86PML4E    pPml4eSrc = &Pml4eSrc;
     2640
     2641        /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
     2642        Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
     2643        PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
     2644#  endif
     2645
     2646        int rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
    26282647        if (rc != VINF_SUCCESS)
    26292648        {
     
    27062725#  elif PGM_GST_TYPE == PGM_TYPE_AMD64
    27072726    unsigned        iPDSrc;
    2708     PX86PML4E       pPml4e;
    2709     X86PDPE         Pdpe;
    2710     PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4e, &Pdpe, &iPDSrc);
     2727    PX86PML4E       pPml4eSrc;
     2728    X86PDPE         PdpeSrc;
     2729    PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
    27112730    if (!pPDSrc)
    27122731    {
     
    27332752    PX86PDEPAE      pPdeDst;
    27342753
    2735     rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst);
     2754#  if PGM_GST_TYPE == PGM_TYPE_PROT
     2755    /* AMD-V nested paging */
     2756    X86PML4E     Pml4eSrc;
     2757    X86PDPE      PdpeSrc;
     2758    PX86PML4E    pPml4eSrc = &Pml4eSrc;
     2759
     2760    /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
     2761    Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
     2762    PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
     2763#  endif
     2764
     2765    rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
    27362766    if (rc != VINF_SUCCESS)
    27372767    {
     
    29502980    for (uint64_t iPML4E = 0; iPML4E < X86_PG_PAE_ENTRIES; iPML4E++)
    29512981    {
    2952         /* Shadow PML4E present? */
    2953         if (pVM->pgm.s.CTXMID(p,PaePML4)->a[iPML4E].n.u1Present)
     2982        /* Guest PML4E not present (anymore). */
     2983        if (!pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPML4E].n.u1Present)
    29542984        {
    29552985            /** @todo this is not efficient; figure out if we can reuse the existing cached version */
    2956             /* Guest PML4E not present (anymore). */
    2957             if (!pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPML4E].n.u1Present)
     2986            /* Shadow PML4E present? */
     2987            if (pVM->pgm.s.CTXMID(p,PaePML4)->a[iPML4E].n.u1Present)
    29582988            {
    29592989                /* Shadow PML4 present, so free all pdpt & pd entries. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette